repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
kmedian/korr
korr/find_unrelated.py
find_unrelated
def find_unrelated(x, plim=0.1, axis=0): """Find indicies of insignificant un-/correlated variables Example: -------- i, j = find_unrelated(x, plim, rlim) """ # transpose if axis<>0 if axis is not 0: x = x.T # read dimensions and allocate variables _, c = x.shape pairs = [] # compute each (i,j)-th correlation for i in range(0, c): for j in range(i + 1, c): _, p = scipy.stats.pearsonr(x[:, i], x[:, j]) if p > plim: pairs.append((i, j)) # done return tuple(pairs)
python
def find_unrelated(x, plim=0.1, axis=0): """Find indicies of insignificant un-/correlated variables Example: -------- i, j = find_unrelated(x, plim, rlim) """ # transpose if axis<>0 if axis is not 0: x = x.T # read dimensions and allocate variables _, c = x.shape pairs = [] # compute each (i,j)-th correlation for i in range(0, c): for j in range(i + 1, c): _, p = scipy.stats.pearsonr(x[:, i], x[:, j]) if p > plim: pairs.append((i, j)) # done return tuple(pairs)
[ "def", "find_unrelated", "(", "x", ",", "plim", "=", "0.1", ",", "axis", "=", "0", ")", ":", "# transpose if axis<>0", "if", "axis", "is", "not", "0", ":", "x", "=", "x", ".", "T", "# read dimensions and allocate variables", "_", ",", "c", "=", "x", ".", "shape", "pairs", "=", "[", "]", "# compute each (i,j)-th correlation", "for", "i", "in", "range", "(", "0", ",", "c", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "c", ")", ":", "_", ",", "p", "=", "scipy", ".", "stats", ".", "pearsonr", "(", "x", "[", ":", ",", "i", "]", ",", "x", "[", ":", ",", "j", "]", ")", "if", "p", ">", "plim", ":", "pairs", ".", "append", "(", "(", "i", ",", "j", ")", ")", "# done", "return", "tuple", "(", "pairs", ")" ]
Find indicies of insignificant un-/correlated variables Example: -------- i, j = find_unrelated(x, plim, rlim)
[ "Find", "indicies", "of", "insignificant", "un", "-", "/", "correlated", "variables" ]
train
https://github.com/kmedian/korr/blob/4eb86fc14b1fc1b69204069b7753d115b327c937/korr/find_unrelated.py#L5-L28
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/connectivity_runner.py
ConnectivityRunner.apply_connectivity_changes
def apply_connectivity_changes(self, request): """ Handle apply connectivity changes request json, trigger add or remove vlan methods, get responce from them and create json response :param request: json with all required action to configure or remove vlans from certain port :return Serialized DriverResponseRoot to json :rtype json """ if request is None or request == "": raise Exception(self.__class__.__name__, "request is None or empty") holder = JsonRequestDeserializer(jsonpickle.decode(request)) if not holder or not hasattr(holder, "driverRequest"): raise Exception(self.__class__.__name__, "Deserialized request is None or empty") driver_response = DriverResponse() add_vlan_thread_list = [] remove_vlan_thread_list = [] driver_response_root = DriverResponseRoot() for action in holder.driverRequest.actions: self._logger.info("Action: ", action.__dict__) self._validate_request_action(action) action_id = action.actionId full_name = action.actionTarget.fullName port_mode = action.connectionParams.mode.lower() if action.type == "setVlan": qnq = False ctag = "" for attribute in action.connectionParams.vlanServiceAttributes: if attribute.attributeName.lower() == "qnq" and attribute.attributeValue.lower() == "true": qnq = True if attribute.attributeName.lower() == "ctag": ctag = attribute.attributeValue for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): add_vlan_thread = Thread(target=self.add_vlan, name=action_id, args=(vlan_id, full_name, port_mode, qnq, ctag)) add_vlan_thread_list.append(add_vlan_thread) elif action.type == "removeVlan": for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): remove_vlan_thread = Thread(target=self.remove_vlan, name=action_id, args=(vlan_id, full_name, port_mode,)) remove_vlan_thread_list.append(remove_vlan_thread) else: self._logger.warning("Undefined action type determined '{}': {}".format(action.type, action.__dict__)) continue # Start all created remove_vlan_threads for thread in remove_vlan_thread_list: thread.start() # Join all remove_vlan_threads. Main thread will wait completion of all remove_vlan_thread for thread in remove_vlan_thread_list: thread.join() # Start all created add_vlan_threads for thread in add_vlan_thread_list: thread.start() # Join all add_vlan_threads. Main thread will wait completion of all add_vlan_thread for thread in add_vlan_thread_list: thread.join() request_result = [] for action in holder.driverRequest.actions: result_statuses, message = zip(*self.result.get(action.actionId)) if all(result_statuses): action_result = ConnectivitySuccessResponse(action, "Add Vlan {vlan} configuration successfully completed" .format(vlan=action.connectionParams.vlanId)) else: message_details = "\n\t".join(message) action_result = ConnectivityErrorResponse(action, "Add Vlan {vlan} configuration failed." "\nAdd Vlan configuration details:\n{message_details}" .format(vlan=action.connectionParams.vlanId, message_details=message_details)) request_result.append(action_result) driver_response.actionResults = request_result driver_response_root.driverResponse = driver_response return serialize_to_json(driver_response_root)
python
def apply_connectivity_changes(self, request): """ Handle apply connectivity changes request json, trigger add or remove vlan methods, get responce from them and create json response :param request: json with all required action to configure or remove vlans from certain port :return Serialized DriverResponseRoot to json :rtype json """ if request is None or request == "": raise Exception(self.__class__.__name__, "request is None or empty") holder = JsonRequestDeserializer(jsonpickle.decode(request)) if not holder or not hasattr(holder, "driverRequest"): raise Exception(self.__class__.__name__, "Deserialized request is None or empty") driver_response = DriverResponse() add_vlan_thread_list = [] remove_vlan_thread_list = [] driver_response_root = DriverResponseRoot() for action in holder.driverRequest.actions: self._logger.info("Action: ", action.__dict__) self._validate_request_action(action) action_id = action.actionId full_name = action.actionTarget.fullName port_mode = action.connectionParams.mode.lower() if action.type == "setVlan": qnq = False ctag = "" for attribute in action.connectionParams.vlanServiceAttributes: if attribute.attributeName.lower() == "qnq" and attribute.attributeValue.lower() == "true": qnq = True if attribute.attributeName.lower() == "ctag": ctag = attribute.attributeValue for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): add_vlan_thread = Thread(target=self.add_vlan, name=action_id, args=(vlan_id, full_name, port_mode, qnq, ctag)) add_vlan_thread_list.append(add_vlan_thread) elif action.type == "removeVlan": for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): remove_vlan_thread = Thread(target=self.remove_vlan, name=action_id, args=(vlan_id, full_name, port_mode,)) remove_vlan_thread_list.append(remove_vlan_thread) else: self._logger.warning("Undefined action type determined '{}': {}".format(action.type, action.__dict__)) continue # Start all created remove_vlan_threads for thread in remove_vlan_thread_list: thread.start() # Join all remove_vlan_threads. Main thread will wait completion of all remove_vlan_thread for thread in remove_vlan_thread_list: thread.join() # Start all created add_vlan_threads for thread in add_vlan_thread_list: thread.start() # Join all add_vlan_threads. Main thread will wait completion of all add_vlan_thread for thread in add_vlan_thread_list: thread.join() request_result = [] for action in holder.driverRequest.actions: result_statuses, message = zip(*self.result.get(action.actionId)) if all(result_statuses): action_result = ConnectivitySuccessResponse(action, "Add Vlan {vlan} configuration successfully completed" .format(vlan=action.connectionParams.vlanId)) else: message_details = "\n\t".join(message) action_result = ConnectivityErrorResponse(action, "Add Vlan {vlan} configuration failed." "\nAdd Vlan configuration details:\n{message_details}" .format(vlan=action.connectionParams.vlanId, message_details=message_details)) request_result.append(action_result) driver_response.actionResults = request_result driver_response_root.driverResponse = driver_response return serialize_to_json(driver_response_root)
[ "def", "apply_connectivity_changes", "(", "self", ",", "request", ")", ":", "if", "request", "is", "None", "or", "request", "==", "\"\"", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"request is None or empty\"", ")", "holder", "=", "JsonRequestDeserializer", "(", "jsonpickle", ".", "decode", "(", "request", ")", ")", "if", "not", "holder", "or", "not", "hasattr", "(", "holder", ",", "\"driverRequest\"", ")", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"Deserialized request is None or empty\"", ")", "driver_response", "=", "DriverResponse", "(", ")", "add_vlan_thread_list", "=", "[", "]", "remove_vlan_thread_list", "=", "[", "]", "driver_response_root", "=", "DriverResponseRoot", "(", ")", "for", "action", "in", "holder", ".", "driverRequest", ".", "actions", ":", "self", ".", "_logger", ".", "info", "(", "\"Action: \"", ",", "action", ".", "__dict__", ")", "self", ".", "_validate_request_action", "(", "action", ")", "action_id", "=", "action", ".", "actionId", "full_name", "=", "action", ".", "actionTarget", ".", "fullName", "port_mode", "=", "action", ".", "connectionParams", ".", "mode", ".", "lower", "(", ")", "if", "action", ".", "type", "==", "\"setVlan\"", ":", "qnq", "=", "False", "ctag", "=", "\"\"", "for", "attribute", "in", "action", ".", "connectionParams", ".", "vlanServiceAttributes", ":", "if", "attribute", ".", "attributeName", ".", "lower", "(", ")", "==", "\"qnq\"", "and", "attribute", ".", "attributeValue", ".", "lower", "(", ")", "==", "\"true\"", ":", "qnq", "=", "True", "if", "attribute", ".", "attributeName", ".", "lower", "(", ")", "==", "\"ctag\"", ":", "ctag", "=", "attribute", ".", "attributeValue", "for", "vlan_id", "in", "self", ".", "_get_vlan_list", "(", "action", ".", "connectionParams", ".", "vlanId", ")", ":", "add_vlan_thread", "=", "Thread", "(", "target", "=", "self", ".", "add_vlan", ",", "name", "=", "action_id", ",", "args", "=", "(", "vlan_id", ",", "full_name", ",", "port_mode", ",", "qnq", ",", "ctag", ")", ")", "add_vlan_thread_list", ".", "append", "(", "add_vlan_thread", ")", "elif", "action", ".", "type", "==", "\"removeVlan\"", ":", "for", "vlan_id", "in", "self", ".", "_get_vlan_list", "(", "action", ".", "connectionParams", ".", "vlanId", ")", ":", "remove_vlan_thread", "=", "Thread", "(", "target", "=", "self", ".", "remove_vlan", ",", "name", "=", "action_id", ",", "args", "=", "(", "vlan_id", ",", "full_name", ",", "port_mode", ",", ")", ")", "remove_vlan_thread_list", ".", "append", "(", "remove_vlan_thread", ")", "else", ":", "self", ".", "_logger", ".", "warning", "(", "\"Undefined action type determined '{}': {}\"", ".", "format", "(", "action", ".", "type", ",", "action", ".", "__dict__", ")", ")", "continue", "# Start all created remove_vlan_threads", "for", "thread", "in", "remove_vlan_thread_list", ":", "thread", ".", "start", "(", ")", "# Join all remove_vlan_threads. Main thread will wait completion of all remove_vlan_thread", "for", "thread", "in", "remove_vlan_thread_list", ":", "thread", ".", "join", "(", ")", "# Start all created add_vlan_threads", "for", "thread", "in", "add_vlan_thread_list", ":", "thread", ".", "start", "(", ")", "# Join all add_vlan_threads. Main thread will wait completion of all add_vlan_thread", "for", "thread", "in", "add_vlan_thread_list", ":", "thread", ".", "join", "(", ")", "request_result", "=", "[", "]", "for", "action", "in", "holder", ".", "driverRequest", ".", "actions", ":", "result_statuses", ",", "message", "=", "zip", "(", "*", "self", ".", "result", ".", "get", "(", "action", ".", "actionId", ")", ")", "if", "all", "(", "result_statuses", ")", ":", "action_result", "=", "ConnectivitySuccessResponse", "(", "action", ",", "\"Add Vlan {vlan} configuration successfully completed\"", ".", "format", "(", "vlan", "=", "action", ".", "connectionParams", ".", "vlanId", ")", ")", "else", ":", "message_details", "=", "\"\\n\\t\"", ".", "join", "(", "message", ")", "action_result", "=", "ConnectivityErrorResponse", "(", "action", ",", "\"Add Vlan {vlan} configuration failed.\"", "\"\\nAdd Vlan configuration details:\\n{message_details}\"", ".", "format", "(", "vlan", "=", "action", ".", "connectionParams", ".", "vlanId", ",", "message_details", "=", "message_details", ")", ")", "request_result", ".", "append", "(", "action_result", ")", "driver_response", ".", "actionResults", "=", "request_result", "driver_response_root", ".", "driverResponse", "=", "driver_response", "return", "serialize_to_json", "(", "driver_response_root", ")" ]
Handle apply connectivity changes request json, trigger add or remove vlan methods, get responce from them and create json response :param request: json with all required action to configure or remove vlans from certain port :return Serialized DriverResponseRoot to json :rtype json
[ "Handle", "apply", "connectivity", "changes", "request", "json", "trigger", "add", "or", "remove", "vlan", "methods", "get", "responce", "from", "them", "and", "create", "json", "response" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/connectivity_runner.py#L58-L145
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/connectivity_runner.py
ConnectivityRunner._validate_request_action
def _validate_request_action(self, action): """ Validate action from the request json, according to APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST """ is_fail = False fail_attribute = "" for class_attribute in self.APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST: if type(class_attribute) is tuple: if not hasattr(action, class_attribute[0]): is_fail = True fail_attribute = class_attribute[0] if not hasattr(getattr(action, class_attribute[0]), class_attribute[1]): is_fail = True fail_attribute = class_attribute[1] else: if not hasattr(action, class_attribute): is_fail = True fail_attribute = class_attribute if is_fail: raise Exception(self.__class__.__name__, "Mandatory field {0} is missing in ApplyConnectivityChanges request json".format( fail_attribute))
python
def _validate_request_action(self, action): """ Validate action from the request json, according to APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST """ is_fail = False fail_attribute = "" for class_attribute in self.APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST: if type(class_attribute) is tuple: if not hasattr(action, class_attribute[0]): is_fail = True fail_attribute = class_attribute[0] if not hasattr(getattr(action, class_attribute[0]), class_attribute[1]): is_fail = True fail_attribute = class_attribute[1] else: if not hasattr(action, class_attribute): is_fail = True fail_attribute = class_attribute if is_fail: raise Exception(self.__class__.__name__, "Mandatory field {0} is missing in ApplyConnectivityChanges request json".format( fail_attribute))
[ "def", "_validate_request_action", "(", "self", ",", "action", ")", ":", "is_fail", "=", "False", "fail_attribute", "=", "\"\"", "for", "class_attribute", "in", "self", ".", "APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST", ":", "if", "type", "(", "class_attribute", ")", "is", "tuple", ":", "if", "not", "hasattr", "(", "action", ",", "class_attribute", "[", "0", "]", ")", ":", "is_fail", "=", "True", "fail_attribute", "=", "class_attribute", "[", "0", "]", "if", "not", "hasattr", "(", "getattr", "(", "action", ",", "class_attribute", "[", "0", "]", ")", ",", "class_attribute", "[", "1", "]", ")", ":", "is_fail", "=", "True", "fail_attribute", "=", "class_attribute", "[", "1", "]", "else", ":", "if", "not", "hasattr", "(", "action", ",", "class_attribute", ")", ":", "is_fail", "=", "True", "fail_attribute", "=", "class_attribute", "if", "is_fail", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"Mandatory field {0} is missing in ApplyConnectivityChanges request json\"", ".", "format", "(", "fail_attribute", ")", ")" ]
Validate action from the request json, according to APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST
[ "Validate", "action", "from", "the", "request", "json", "according", "to", "APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/connectivity_runner.py#L147-L169
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/connectivity_runner.py
ConnectivityRunner._get_vlan_list
def _get_vlan_list(self, vlan_str): """ Get VLAN list from input string :param vlan_str: :return list of VLANs or Exception """ result = set() for splitted_vlan in vlan_str.split(","): if "-" not in splitted_vlan: if validate_vlan_number(splitted_vlan): result.add(int(splitted_vlan)) else: raise Exception(self.__class__.__name__, "Wrong VLAN number detected {}".format(splitted_vlan)) else: if self.IS_VLAN_RANGE_SUPPORTED: if validate_vlan_range(splitted_vlan): result.add(splitted_vlan) else: raise Exception(self.__class__.__name__, "Wrong VLANs range detected {}".format(vlan_str)) else: start, end = map(int, splitted_vlan.split("-")) if validate_vlan_number(start) and validate_vlan_number(end): if start > end: start, end = end, start for vlan in range(start, end + 1): result.add(vlan) else: raise Exception(self.__class__.__name__, "Wrong VLANs range detected {}".format(vlan_str)) return map(str, list(result))
python
def _get_vlan_list(self, vlan_str): """ Get VLAN list from input string :param vlan_str: :return list of VLANs or Exception """ result = set() for splitted_vlan in vlan_str.split(","): if "-" not in splitted_vlan: if validate_vlan_number(splitted_vlan): result.add(int(splitted_vlan)) else: raise Exception(self.__class__.__name__, "Wrong VLAN number detected {}".format(splitted_vlan)) else: if self.IS_VLAN_RANGE_SUPPORTED: if validate_vlan_range(splitted_vlan): result.add(splitted_vlan) else: raise Exception(self.__class__.__name__, "Wrong VLANs range detected {}".format(vlan_str)) else: start, end = map(int, splitted_vlan.split("-")) if validate_vlan_number(start) and validate_vlan_number(end): if start > end: start, end = end, start for vlan in range(start, end + 1): result.add(vlan) else: raise Exception(self.__class__.__name__, "Wrong VLANs range detected {}".format(vlan_str)) return map(str, list(result))
[ "def", "_get_vlan_list", "(", "self", ",", "vlan_str", ")", ":", "result", "=", "set", "(", ")", "for", "splitted_vlan", "in", "vlan_str", ".", "split", "(", "\",\"", ")", ":", "if", "\"-\"", "not", "in", "splitted_vlan", ":", "if", "validate_vlan_number", "(", "splitted_vlan", ")", ":", "result", ".", "add", "(", "int", "(", "splitted_vlan", ")", ")", "else", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"Wrong VLAN number detected {}\"", ".", "format", "(", "splitted_vlan", ")", ")", "else", ":", "if", "self", ".", "IS_VLAN_RANGE_SUPPORTED", ":", "if", "validate_vlan_range", "(", "splitted_vlan", ")", ":", "result", ".", "add", "(", "splitted_vlan", ")", "else", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"Wrong VLANs range detected {}\"", ".", "format", "(", "vlan_str", ")", ")", "else", ":", "start", ",", "end", "=", "map", "(", "int", ",", "splitted_vlan", ".", "split", "(", "\"-\"", ")", ")", "if", "validate_vlan_number", "(", "start", ")", "and", "validate_vlan_number", "(", "end", ")", ":", "if", "start", ">", "end", ":", "start", ",", "end", "=", "end", ",", "start", "for", "vlan", "in", "range", "(", "start", ",", "end", "+", "1", ")", ":", "result", ".", "add", "(", "vlan", ")", "else", ":", "raise", "Exception", "(", "self", ".", "__class__", ".", "__name__", ",", "\"Wrong VLANs range detected {}\"", ".", "format", "(", "vlan_str", ")", ")", "return", "map", "(", "str", ",", "list", "(", "result", ")", ")" ]
Get VLAN list from input string :param vlan_str: :return list of VLANs or Exception
[ "Get", "VLAN", "list", "from", "input", "string" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/connectivity_runner.py#L171-L201
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/connectivity_runner.py
ConnectivityRunner.add_vlan
def add_vlan(self, vlan_id, full_name, port_mode, qnq, c_tag): """ Run flow to add VLAN(s) to interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access :param qnq: :param c_tag: """ try: action_result = self.add_vlan_flow.execute_flow(vlan_range=vlan_id, port_mode=port_mode, port_name=full_name, qnq=qnq, c_tag=c_tag) self.result[current_thread().name].append((True, action_result)) except Exception as e: self._logger.error(traceback.format_exc()) self.result[current_thread().name].append((False, e.message))
python
def add_vlan(self, vlan_id, full_name, port_mode, qnq, c_tag): """ Run flow to add VLAN(s) to interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access :param qnq: :param c_tag: """ try: action_result = self.add_vlan_flow.execute_flow(vlan_range=vlan_id, port_mode=port_mode, port_name=full_name, qnq=qnq, c_tag=c_tag) self.result[current_thread().name].append((True, action_result)) except Exception as e: self._logger.error(traceback.format_exc()) self.result[current_thread().name].append((False, e.message))
[ "def", "add_vlan", "(", "self", ",", "vlan_id", ",", "full_name", ",", "port_mode", ",", "qnq", ",", "c_tag", ")", ":", "try", ":", "action_result", "=", "self", ".", "add_vlan_flow", ".", "execute_flow", "(", "vlan_range", "=", "vlan_id", ",", "port_mode", "=", "port_mode", ",", "port_name", "=", "full_name", ",", "qnq", "=", "qnq", ",", "c_tag", "=", "c_tag", ")", "self", ".", "result", "[", "current_thread", "(", ")", ".", "name", "]", ".", "append", "(", "(", "True", ",", "action_result", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "error", "(", "traceback", ".", "format_exc", "(", ")", ")", "self", ".", "result", "[", "current_thread", "(", ")", ".", "name", "]", ".", "append", "(", "(", "False", ",", "e", ".", "message", ")", ")" ]
Run flow to add VLAN(s) to interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access :param qnq: :param c_tag:
[ "Run", "flow", "to", "add", "VLAN", "(", "s", ")", "to", "interface" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/connectivity_runner.py#L203-L222
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/connectivity_runner.py
ConnectivityRunner.remove_vlan
def remove_vlan(self, vlan_id, full_name, port_mode): """ Run flow to remove VLAN(s) from interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access """ try: action_result = self.remove_vlan_flow.execute_flow(vlan_range=vlan_id, port_name=full_name, port_mode=port_mode) self.result[current_thread().name].append((True, action_result)) except Exception as e: self._logger.error(traceback.format_exc()) self.result[current_thread().name].append((False, e.message))
python
def remove_vlan(self, vlan_id, full_name, port_mode): """ Run flow to remove VLAN(s) from interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access """ try: action_result = self.remove_vlan_flow.execute_flow(vlan_range=vlan_id, port_name=full_name, port_mode=port_mode) self.result[current_thread().name].append((True, action_result)) except Exception as e: self._logger.error(traceback.format_exc()) self.result[current_thread().name].append((False, e.message))
[ "def", "remove_vlan", "(", "self", ",", "vlan_id", ",", "full_name", ",", "port_mode", ")", ":", "try", ":", "action_result", "=", "self", ".", "remove_vlan_flow", ".", "execute_flow", "(", "vlan_range", "=", "vlan_id", ",", "port_name", "=", "full_name", ",", "port_mode", "=", "port_mode", ")", "self", ".", "result", "[", "current_thread", "(", ")", ".", "name", "]", ".", "append", "(", "(", "True", ",", "action_result", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "error", "(", "traceback", ".", "format_exc", "(", ")", ")", "self", ".", "result", "[", "current_thread", "(", ")", ".", "name", "]", ".", "append", "(", "(", "False", ",", "e", ".", "message", ")", ")" ]
Run flow to remove VLAN(s) from interface :param vlan_id: Already validated number of VLAN(s) :param full_name: Full interface name. Example: 2950/Chassis 0/FastEthernet0-23 :param port_mode: port mode type. Should be trunk or access
[ "Run", "flow", "to", "remove", "VLAN", "(", "s", ")", "from", "interface", ":", "param", "vlan_id", ":", "Already", "validated", "number", "of", "VLAN", "(", "s", ")", ":", "param", "full_name", ":", "Full", "interface", "name", ".", "Example", ":", "2950", "/", "Chassis", "0", "/", "FastEthernet0", "-", "23", ":", "param", "port_mode", ":", "port", "mode", "type", ".", "Should", "be", "trunk", "or", "access" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/connectivity_runner.py#L224-L240
eumis/pyviews
pyviews/rendering/views.py
render_view
def render_view(view_name, **args): '''Process view and return root Node''' try: root_xml = get_view_root(view_name) return render(root_xml, **args) except CoreError as error: error.add_view_info(ViewInfo(view_name, None)) raise except: info = exc_info() error = ViewError('Unknown error occured during rendering', ViewInfo(view_name, None)) error.add_cause(info[1]) raise error from info[1]
python
def render_view(view_name, **args): '''Process view and return root Node''' try: root_xml = get_view_root(view_name) return render(root_xml, **args) except CoreError as error: error.add_view_info(ViewInfo(view_name, None)) raise except: info = exc_info() error = ViewError('Unknown error occured during rendering', ViewInfo(view_name, None)) error.add_cause(info[1]) raise error from info[1]
[ "def", "render_view", "(", "view_name", ",", "*", "*", "args", ")", ":", "try", ":", "root_xml", "=", "get_view_root", "(", "view_name", ")", "return", "render", "(", "root_xml", ",", "*", "*", "args", ")", "except", "CoreError", "as", "error", ":", "error", ".", "add_view_info", "(", "ViewInfo", "(", "view_name", ",", "None", ")", ")", "raise", "except", ":", "info", "=", "exc_info", "(", ")", "error", "=", "ViewError", "(", "'Unknown error occured during rendering'", ",", "ViewInfo", "(", "view_name", ",", "None", ")", ")", "error", ".", "add_cause", "(", "info", "[", "1", "]", ")", "raise", "error", "from", "info", "[", "1", "]" ]
Process view and return root Node
[ "Process", "view", "and", "return", "root", "Node" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/views.py#L13-L25
eumis/pyviews
pyviews/rendering/views.py
get_view_root
def get_view_root(view_name: str) -> XmlNode: '''Parses xml file and return root XmlNode''' try: path = join(deps.views_folder, '{0}.{1}'.format(view_name, deps.view_ext)) parser = Parser() if path not in _XML_CACHE: with open(path, 'rb') as xml_file: _XML_CACHE[path] = parser.parse(xml_file, view_name) return _XML_CACHE[path] except FileNotFoundError as error: error = ViewError('View is not found') error.add_info('View name', view_name) error.add_info('Path', path) raise error except CoreError as error: error.add_view_info(ViewInfo(view_name, None)) raise except: info = exc_info() error = ViewError('Unknown error occured during parsing xml', ViewInfo(view_name, None)) error.add_cause(info[1]) raise error from info[1]
python
def get_view_root(view_name: str) -> XmlNode: '''Parses xml file and return root XmlNode''' try: path = join(deps.views_folder, '{0}.{1}'.format(view_name, deps.view_ext)) parser = Parser() if path not in _XML_CACHE: with open(path, 'rb') as xml_file: _XML_CACHE[path] = parser.parse(xml_file, view_name) return _XML_CACHE[path] except FileNotFoundError as error: error = ViewError('View is not found') error.add_info('View name', view_name) error.add_info('Path', path) raise error except CoreError as error: error.add_view_info(ViewInfo(view_name, None)) raise except: info = exc_info() error = ViewError('Unknown error occured during parsing xml', ViewInfo(view_name, None)) error.add_cause(info[1]) raise error from info[1]
[ "def", "get_view_root", "(", "view_name", ":", "str", ")", "->", "XmlNode", ":", "try", ":", "path", "=", "join", "(", "deps", ".", "views_folder", ",", "'{0}.{1}'", ".", "format", "(", "view_name", ",", "deps", ".", "view_ext", ")", ")", "parser", "=", "Parser", "(", ")", "if", "path", "not", "in", "_XML_CACHE", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "xml_file", ":", "_XML_CACHE", "[", "path", "]", "=", "parser", ".", "parse", "(", "xml_file", ",", "view_name", ")", "return", "_XML_CACHE", "[", "path", "]", "except", "FileNotFoundError", "as", "error", ":", "error", "=", "ViewError", "(", "'View is not found'", ")", "error", ".", "add_info", "(", "'View name'", ",", "view_name", ")", "error", ".", "add_info", "(", "'Path'", ",", "path", ")", "raise", "error", "except", "CoreError", "as", "error", ":", "error", ".", "add_view_info", "(", "ViewInfo", "(", "view_name", ",", "None", ")", ")", "raise", "except", ":", "info", "=", "exc_info", "(", ")", "error", "=", "ViewError", "(", "'Unknown error occured during parsing xml'", ",", "ViewInfo", "(", "view_name", ",", "None", ")", ")", "error", ".", "add_cause", "(", "info", "[", "1", "]", ")", "raise", "error", "from", "info", "[", "1", "]" ]
Parses xml file and return root XmlNode
[ "Parses", "xml", "file", "and", "return", "root", "XmlNode" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/views.py#L29-L50
eumis/pyviews
pyviews/rendering/modifiers.py
import_global
def import_global(node: Node, key: str, path: Any): """Import passed module, class, function full name and stores it to node's globals""" node.node_globals[key] = import_path(path)
python
def import_global(node: Node, key: str, path: Any): """Import passed module, class, function full name and stores it to node's globals""" node.node_globals[key] = import_path(path)
[ "def", "import_global", "(", "node", ":", "Node", ",", "key", ":", "str", ",", "path", ":", "Any", ")", ":", "node", ".", "node_globals", "[", "key", "]", "=", "import_path", "(", "path", ")" ]
Import passed module, class, function full name and stores it to node's globals
[ "Import", "passed", "module", "class", "function", "full", "name", "and", "stores", "it", "to", "node", "s", "globals" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/modifiers.py#L8-L10
eumis/pyviews
pyviews/rendering/modifiers.py
inject_global
def inject_global(node: Node, global_key: str, inject_key: Any): """Resolves passed dependency and stores it to node's globals""" value = get_current_scope().container.get(inject_key) set_global(node, global_key, value)
python
def inject_global(node: Node, global_key: str, inject_key: Any): """Resolves passed dependency and stores it to node's globals""" value = get_current_scope().container.get(inject_key) set_global(node, global_key, value)
[ "def", "inject_global", "(", "node", ":", "Node", ",", "global_key", ":", "str", ",", "inject_key", ":", "Any", ")", ":", "value", "=", "get_current_scope", "(", ")", ".", "container", ".", "get", "(", "inject_key", ")", "set_global", "(", "node", ",", "global_key", ",", "value", ")" ]
Resolves passed dependency and stores it to node's globals
[ "Resolves", "passed", "dependency", "and", "stores", "it", "to", "node", "s", "globals" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/modifiers.py#L13-L16
eumis/pyviews
pyviews/rendering/modifiers.py
set_global
def set_global(node: Node, key: str, value: Any): """Adds passed value to node's globals""" node.node_globals[key] = value
python
def set_global(node: Node, key: str, value: Any): """Adds passed value to node's globals""" node.node_globals[key] = value
[ "def", "set_global", "(", "node", ":", "Node", ",", "key", ":", "str", ",", "value", ":", "Any", ")", ":", "node", ".", "node_globals", "[", "key", "]", "=", "value" ]
Adds passed value to node's globals
[ "Adds", "passed", "value", "to", "node", "s", "globals" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/modifiers.py#L19-L21
eumis/pyviews
pyviews/rendering/modifiers.py
call
def call(node: Node, key: str, value: Any): """Calls node or node instance method""" value = _to_list(value) if not value or not isinstance(value[-1], dict): value.append({}) args = value[0:-1] kwargs = value[-1] node.__dict__[key](*args, **kwargs)
python
def call(node: Node, key: str, value: Any): """Calls node or node instance method""" value = _to_list(value) if not value or not isinstance(value[-1], dict): value.append({}) args = value[0:-1] kwargs = value[-1] node.__dict__[key](*args, **kwargs)
[ "def", "call", "(", "node", ":", "Node", ",", "key", ":", "str", ",", "value", ":", "Any", ")", ":", "value", "=", "_to_list", "(", "value", ")", "if", "not", "value", "or", "not", "isinstance", "(", "value", "[", "-", "1", "]", ",", "dict", ")", ":", "value", ".", "append", "(", "{", "}", ")", "args", "=", "value", "[", "0", ":", "-", "1", "]", "kwargs", "=", "value", "[", "-", "1", "]", "node", ".", "__dict__", "[", "key", "]", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Calls node or node instance method
[ "Calls", "node", "or", "node", "instance", "method" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/modifiers.py#L24-L31
upsight/doctor
doctor/docs/flask.py
AutoFlaskHarness.iter_annotations
def iter_annotations(self): """Yield a tuple for each Flask handler containing annotated methods. Each tuple contains a heading, routing rule, the view class associated with the rule, and the annotations for the methods in that class. """ # Need to store a list of route, view_class, and annotations by a # section key so that all methods of a resource are kept together in # the documentation. The key of the section will be the heading that # the route documentation goes under. section_map = defaultdict(list) for rule in self.app.url_map.iter_rules(): if rule.endpoint == 'static': # Don't document static file endpoints. continue # This gives us the auto-generated view function. view_function = self.app.view_functions.get(rule.endpoint) if view_function is None: continue # This gives us the actual Flask resource class. view_class = getattr(view_function, 'view_class', None) if view_class is None: continue annotations = [] for method_name in HTTP_METHODS: method = getattr(view_class, method_name, None) if not method: continue annotation = ResourceAnnotation( method, method_name, method._doctor_title) annotations.append(annotation) if annotations: heading = self._get_annotation_heading(view_class, str(rule)) section_map[heading].append((rule, view_class, annotations)) # Loop through each heading and it's items and yield the values. for heading in sorted(section_map.keys()): for item in section_map[heading]: rule, view_class, annotations = item yield (heading, rule, view_class, annotations)
python
def iter_annotations(self): """Yield a tuple for each Flask handler containing annotated methods. Each tuple contains a heading, routing rule, the view class associated with the rule, and the annotations for the methods in that class. """ # Need to store a list of route, view_class, and annotations by a # section key so that all methods of a resource are kept together in # the documentation. The key of the section will be the heading that # the route documentation goes under. section_map = defaultdict(list) for rule in self.app.url_map.iter_rules(): if rule.endpoint == 'static': # Don't document static file endpoints. continue # This gives us the auto-generated view function. view_function = self.app.view_functions.get(rule.endpoint) if view_function is None: continue # This gives us the actual Flask resource class. view_class = getattr(view_function, 'view_class', None) if view_class is None: continue annotations = [] for method_name in HTTP_METHODS: method = getattr(view_class, method_name, None) if not method: continue annotation = ResourceAnnotation( method, method_name, method._doctor_title) annotations.append(annotation) if annotations: heading = self._get_annotation_heading(view_class, str(rule)) section_map[heading].append((rule, view_class, annotations)) # Loop through each heading and it's items and yield the values. for heading in sorted(section_map.keys()): for item in section_map[heading]: rule, view_class, annotations = item yield (heading, rule, view_class, annotations)
[ "def", "iter_annotations", "(", "self", ")", ":", "# Need to store a list of route, view_class, and annotations by a", "# section key so that all methods of a resource are kept together in", "# the documentation. The key of the section will be the heading that", "# the route documentation goes under.", "section_map", "=", "defaultdict", "(", "list", ")", "for", "rule", "in", "self", ".", "app", ".", "url_map", ".", "iter_rules", "(", ")", ":", "if", "rule", ".", "endpoint", "==", "'static'", ":", "# Don't document static file endpoints.", "continue", "# This gives us the auto-generated view function.", "view_function", "=", "self", ".", "app", ".", "view_functions", ".", "get", "(", "rule", ".", "endpoint", ")", "if", "view_function", "is", "None", ":", "continue", "# This gives us the actual Flask resource class.", "view_class", "=", "getattr", "(", "view_function", ",", "'view_class'", ",", "None", ")", "if", "view_class", "is", "None", ":", "continue", "annotations", "=", "[", "]", "for", "method_name", "in", "HTTP_METHODS", ":", "method", "=", "getattr", "(", "view_class", ",", "method_name", ",", "None", ")", "if", "not", "method", ":", "continue", "annotation", "=", "ResourceAnnotation", "(", "method", ",", "method_name", ",", "method", ".", "_doctor_title", ")", "annotations", ".", "append", "(", "annotation", ")", "if", "annotations", ":", "heading", "=", "self", ".", "_get_annotation_heading", "(", "view_class", ",", "str", "(", "rule", ")", ")", "section_map", "[", "heading", "]", ".", "append", "(", "(", "rule", ",", "view_class", ",", "annotations", ")", ")", "# Loop through each heading and it's items and yield the values.", "for", "heading", "in", "sorted", "(", "section_map", ".", "keys", "(", ")", ")", ":", "for", "item", "in", "section_map", "[", "heading", "]", ":", "rule", ",", "view_class", ",", "annotations", "=", "item", "yield", "(", "heading", ",", "rule", ",", "view_class", ",", "annotations", ")" ]
Yield a tuple for each Flask handler containing annotated methods. Each tuple contains a heading, routing rule, the view class associated with the rule, and the annotations for the methods in that class.
[ "Yield", "a", "tuple", "for", "each", "Flask", "handler", "containing", "annotated", "methods", "." ]
train
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/flask.py#L41-L81
upsight/doctor
doctor/docs/flask.py
AutoFlaskHarness.request
def request(self, rule, view_class, annotation): """Make a request against the app. This attempts to use the schema to replace any url params in the path pattern. If there are any unused parameters in the schema, after substituting the ones in the path, they will be sent as query string parameters or form parameters. The substituted values are taken from the "example" value in the schema. Returns a dict with the following keys: - **url** -- Example URL, with url_prefix added to the path pattern, and the example values substituted in for URL params. - **method** -- HTTP request method (e.g. "GET"). - **params** -- A dictionary of query string or form parameters. - **response** -- The text response to the request. :param route: Werkzeug Route object. :param view_class: View class for the annotated method. :param annotation: Annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :returns: dict """ headers = self._get_headers(rule, annotation) example_values = self._get_example_values(rule, annotation) # If any of the example values for DELETE/GET HTTP methods are dicts # or lists, we will need to json dump them before building the rule, # otherwise the query string parameter won't get parsed correctly # by doctor. if annotation.http_method.upper() in ('DELETE', 'GET'): for key, value in list(example_values.items()): if isinstance(value, (dict, list)): example_values[key] = json.dumps(value) _, path = rule.build(example_values, append_unknown=True) if annotation.http_method.upper() not in ('DELETE', 'GET'): parsed_path = parse.urlparse(path) path = parsed_path.path params = example_values else: params = {} method_name = annotation.http_method.lower() method = getattr(self.test_client, method_name) if method_name in ('post', 'put'): response = method(path, data=json.dumps(params), headers=headers, content_type='application/json') else: response = method(path, data=params, headers=headers) return { 'url': '/'.join([self.url_prefix, path.lstrip('/')]), 'method': annotation.http_method.upper(), 'params': params, 'response': response.data, }
python
def request(self, rule, view_class, annotation): """Make a request against the app. This attempts to use the schema to replace any url params in the path pattern. If there are any unused parameters in the schema, after substituting the ones in the path, they will be sent as query string parameters or form parameters. The substituted values are taken from the "example" value in the schema. Returns a dict with the following keys: - **url** -- Example URL, with url_prefix added to the path pattern, and the example values substituted in for URL params. - **method** -- HTTP request method (e.g. "GET"). - **params** -- A dictionary of query string or form parameters. - **response** -- The text response to the request. :param route: Werkzeug Route object. :param view_class: View class for the annotated method. :param annotation: Annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :returns: dict """ headers = self._get_headers(rule, annotation) example_values = self._get_example_values(rule, annotation) # If any of the example values for DELETE/GET HTTP methods are dicts # or lists, we will need to json dump them before building the rule, # otherwise the query string parameter won't get parsed correctly # by doctor. if annotation.http_method.upper() in ('DELETE', 'GET'): for key, value in list(example_values.items()): if isinstance(value, (dict, list)): example_values[key] = json.dumps(value) _, path = rule.build(example_values, append_unknown=True) if annotation.http_method.upper() not in ('DELETE', 'GET'): parsed_path = parse.urlparse(path) path = parsed_path.path params = example_values else: params = {} method_name = annotation.http_method.lower() method = getattr(self.test_client, method_name) if method_name in ('post', 'put'): response = method(path, data=json.dumps(params), headers=headers, content_type='application/json') else: response = method(path, data=params, headers=headers) return { 'url': '/'.join([self.url_prefix, path.lstrip('/')]), 'method': annotation.http_method.upper(), 'params': params, 'response': response.data, }
[ "def", "request", "(", "self", ",", "rule", ",", "view_class", ",", "annotation", ")", ":", "headers", "=", "self", ".", "_get_headers", "(", "rule", ",", "annotation", ")", "example_values", "=", "self", ".", "_get_example_values", "(", "rule", ",", "annotation", ")", "# If any of the example values for DELETE/GET HTTP methods are dicts", "# or lists, we will need to json dump them before building the rule,", "# otherwise the query string parameter won't get parsed correctly", "# by doctor.", "if", "annotation", ".", "http_method", ".", "upper", "(", ")", "in", "(", "'DELETE'", ",", "'GET'", ")", ":", "for", "key", ",", "value", "in", "list", "(", "example_values", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "value", ",", "(", "dict", ",", "list", ")", ")", ":", "example_values", "[", "key", "]", "=", "json", ".", "dumps", "(", "value", ")", "_", ",", "path", "=", "rule", ".", "build", "(", "example_values", ",", "append_unknown", "=", "True", ")", "if", "annotation", ".", "http_method", ".", "upper", "(", ")", "not", "in", "(", "'DELETE'", ",", "'GET'", ")", ":", "parsed_path", "=", "parse", ".", "urlparse", "(", "path", ")", "path", "=", "parsed_path", ".", "path", "params", "=", "example_values", "else", ":", "params", "=", "{", "}", "method_name", "=", "annotation", ".", "http_method", ".", "lower", "(", ")", "method", "=", "getattr", "(", "self", ".", "test_client", ",", "method_name", ")", "if", "method_name", "in", "(", "'post'", ",", "'put'", ")", ":", "response", "=", "method", "(", "path", ",", "data", "=", "json", ".", "dumps", "(", "params", ")", ",", "headers", "=", "headers", ",", "content_type", "=", "'application/json'", ")", "else", ":", "response", "=", "method", "(", "path", ",", "data", "=", "params", ",", "headers", "=", "headers", ")", "return", "{", "'url'", ":", "'/'", ".", "join", "(", "[", "self", ".", "url_prefix", ",", "path", ".", "lstrip", "(", "'/'", ")", "]", ")", ",", "'method'", ":", "annotation", ".", "http_method", ".", "upper", "(", ")", ",", "'params'", ":", "params", ",", "'response'", ":", "response", ".", "data", ",", "}" ]
Make a request against the app. This attempts to use the schema to replace any url params in the path pattern. If there are any unused parameters in the schema, after substituting the ones in the path, they will be sent as query string parameters or form parameters. The substituted values are taken from the "example" value in the schema. Returns a dict with the following keys: - **url** -- Example URL, with url_prefix added to the path pattern, and the example values substituted in for URL params. - **method** -- HTTP request method (e.g. "GET"). - **params** -- A dictionary of query string or form parameters. - **response** -- The text response to the request. :param route: Werkzeug Route object. :param view_class: View class for the annotated method. :param annotation: Annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :returns: dict
[ "Make", "a", "request", "against", "the", "app", "." ]
train
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/flask.py#L83-L135
eumis/pyviews
pyviews/core/observable.py
Observable.observe
def observe(self, key: str, callback: Callable[[Any, Any], None]): """Subscribes to key changes""" if key not in self._callbacks: self._add_key(key) self._callbacks[key].append(callback)
python
def observe(self, key: str, callback: Callable[[Any, Any], None]): """Subscribes to key changes""" if key not in self._callbacks: self._add_key(key) self._callbacks[key].append(callback)
[ "def", "observe", "(", "self", ",", "key", ":", "str", ",", "callback", ":", "Callable", "[", "[", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "if", "key", "not", "in", "self", ".", "_callbacks", ":", "self", ".", "_add_key", "(", "key", ")", "self", ".", "_callbacks", "[", "key", "]", ".", "append", "(", "callback", ")" ]
Subscribes to key changes
[ "Subscribes", "to", "key", "changes" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L12-L16
eumis/pyviews
pyviews/core/observable.py
Observable.release
def release(self, key: str, callback: Callable[[Any, Any], None]): """Releases callback from key changes""" try: self._callbacks[key].remove(callback) except (KeyError, ValueError): pass
python
def release(self, key: str, callback: Callable[[Any, Any], None]): """Releases callback from key changes""" try: self._callbacks[key].remove(callback) except (KeyError, ValueError): pass
[ "def", "release", "(", "self", ",", "key", ":", "str", ",", "callback", ":", "Callable", "[", "[", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "try", ":", "self", ".", "_callbacks", "[", "key", "]", ".", "remove", "(", "callback", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass" ]
Releases callback from key changes
[ "Releases", "callback", "from", "key", "changes" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L30-L35
eumis/pyviews
pyviews/core/observable.py
ObservableEntity.observe
def observe(self, key, callback: Callable[[Any, Any], None]): """Subscribes to key changes""" if key not in self.__dict__ and key not in self._callbacks: raise KeyError('Entity ' + str(self) + 'doesn''t have attribute' + key) super().observe(key, callback)
python
def observe(self, key, callback: Callable[[Any, Any], None]): """Subscribes to key changes""" if key not in self.__dict__ and key not in self._callbacks: raise KeyError('Entity ' + str(self) + 'doesn''t have attribute' + key) super().observe(key, callback)
[ "def", "observe", "(", "self", ",", "key", ",", "callback", ":", "Callable", "[", "[", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "if", "key", "not", "in", "self", ".", "__dict__", "and", "key", "not", "in", "self", ".", "_callbacks", ":", "raise", "KeyError", "(", "'Entity '", "+", "str", "(", "self", ")", "+", "'doesn'", "'t have attribute'", "+", "key", ")", "super", "(", ")", ".", "observe", "(", "key", ",", "callback", ")" ]
Subscribes to key changes
[ "Subscribes", "to", "key", "changes" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L50-L54
eumis/pyviews
pyviews/core/observable.py
InheritedDict.inherit
def inherit(self, parent): """Inherit passed dictionary""" if self._parent == parent: return if self._parent: self._parent.release_all(self._parent_changed) self_values = {key: self._container[key] for key in self._own_keys} self._container = {**parent.to_dictionary(), **self_values} self._parent = parent self._parent.observe_all(self._parent_changed)
python
def inherit(self, parent): """Inherit passed dictionary""" if self._parent == parent: return if self._parent: self._parent.release_all(self._parent_changed) self_values = {key: self._container[key] for key in self._own_keys} self._container = {**parent.to_dictionary(), **self_values} self._parent = parent self._parent.observe_all(self._parent_changed)
[ "def", "inherit", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "_parent", "==", "parent", ":", "return", "if", "self", ".", "_parent", ":", "self", ".", "_parent", ".", "release_all", "(", "self", ".", "_parent_changed", ")", "self_values", "=", "{", "key", ":", "self", ".", "_container", "[", "key", "]", "for", "key", "in", "self", ".", "_own_keys", "}", "self", ".", "_container", "=", "{", "*", "*", "parent", ".", "to_dictionary", "(", ")", ",", "*", "*", "self_values", "}", "self", ".", "_parent", "=", "parent", "self", ".", "_parent", ".", "observe_all", "(", "self", ".", "_parent_changed", ")" ]
Inherit passed dictionary
[ "Inherit", "passed", "dictionary" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L99-L108
eumis/pyviews
pyviews/core/observable.py
InheritedDict.observe_all
def observe_all(self, callback: Callable[[str, Any, Any], None]): """Subscribes to all keys changes""" self._all_callbacks.append(callback)
python
def observe_all(self, callback: Callable[[str, Any, Any], None]): """Subscribes to all keys changes""" self._all_callbacks.append(callback)
[ "def", "observe_all", "(", "self", ",", "callback", ":", "Callable", "[", "[", "str", ",", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "self", ".", "_all_callbacks", ".", "append", "(", "callback", ")" ]
Subscribes to all keys changes
[ "Subscribes", "to", "all", "keys", "changes" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L110-L112
eumis/pyviews
pyviews/core/observable.py
InheritedDict.release_all
def release_all(self, callback: Callable[[str, Any, Any], None]): """Releases callback from all keys changes""" self._all_callbacks.remove(callback)
python
def release_all(self, callback: Callable[[str, Any, Any], None]): """Releases callback from all keys changes""" self._all_callbacks.remove(callback)
[ "def", "release_all", "(", "self", ",", "callback", ":", "Callable", "[", "[", "str", ",", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "self", ".", "_all_callbacks", ".", "remove", "(", "callback", ")" ]
Releases callback from all keys changes
[ "Releases", "callback", "from", "all", "keys", "changes" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L124-L126
eumis/pyviews
pyviews/core/observable.py
InheritedDict.remove_key
def remove_key(self, key): """Remove own key, value""" try: self._own_keys.discard(key) if self._parent and self._parent.has_key(key): self._container[key] = self._parent[key] else: del self._container[key] except KeyError: pass
python
def remove_key(self, key): """Remove own key, value""" try: self._own_keys.discard(key) if self._parent and self._parent.has_key(key): self._container[key] = self._parent[key] else: del self._container[key] except KeyError: pass
[ "def", "remove_key", "(", "self", ",", "key", ")", ":", "try", ":", "self", ".", "_own_keys", ".", "discard", "(", "key", ")", "if", "self", ".", "_parent", "and", "self", ".", "_parent", ".", "has_key", "(", "key", ")", ":", "self", ".", "_container", "[", "key", "]", "=", "self", ".", "_parent", "[", "key", "]", "else", ":", "del", "self", ".", "_container", "[", "key", "]", "except", "KeyError", ":", "pass" ]
Remove own key, value
[ "Remove", "own", "key", "value" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L136-L145
eumis/pyviews
pyviews/core/binding.py
Binder.add_rule
def add_rule(self, binding_type: str, rule: BindingRule): """Adds new rule""" if binding_type not in self._rules: self._rules[binding_type] = [] self._rules[binding_type].insert(0, rule)
python
def add_rule(self, binding_type: str, rule: BindingRule): """Adds new rule""" if binding_type not in self._rules: self._rules[binding_type] = [] self._rules[binding_type].insert(0, rule)
[ "def", "add_rule", "(", "self", ",", "binding_type", ":", "str", ",", "rule", ":", "BindingRule", ")", ":", "if", "binding_type", "not", "in", "self", ".", "_rules", ":", "self", ".", "_rules", "[", "binding_type", "]", "=", "[", "]", "self", ".", "_rules", "[", "binding_type", "]", ".", "insert", "(", "0", ",", "rule", ")" ]
Adds new rule
[ "Adds", "new", "rule" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/binding.py#L53-L58
eumis/pyviews
pyviews/core/binding.py
Binder.find_rule
def find_rule(self, binding_type: str, **args): """Finds rule by binding type and args""" try: rules = self._rules[binding_type] return next(rule for rule in rules if rule.suitable(**args)) except (KeyError, StopIteration): return None
python
def find_rule(self, binding_type: str, **args): """Finds rule by binding type and args""" try: rules = self._rules[binding_type] return next(rule for rule in rules if rule.suitable(**args)) except (KeyError, StopIteration): return None
[ "def", "find_rule", "(", "self", ",", "binding_type", ":", "str", ",", "*", "*", "args", ")", ":", "try", ":", "rules", "=", "self", ".", "_rules", "[", "binding_type", "]", "return", "next", "(", "rule", "for", "rule", "in", "rules", "if", "rule", ".", "suitable", "(", "*", "*", "args", ")", ")", "except", "(", "KeyError", ",", "StopIteration", ")", ":", "return", "None" ]
Finds rule by binding type and args
[ "Finds", "rule", "by", "binding", "type", "and", "args" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/binding.py#L60-L66
eumis/pyviews
pyviews/core/binding.py
Binder.apply
def apply(self, binding_type, **args): """Returns apply function""" rule = self.find_rule(binding_type, **args) if rule is None: error = BindingError('Binding rule is not found') error.add_info('Binding type', binding_type) error.add_info('args', args) raise error binding = rule.apply(**args) if binding: args['node'].add_binding(rule.apply(**args))
python
def apply(self, binding_type, **args): """Returns apply function""" rule = self.find_rule(binding_type, **args) if rule is None: error = BindingError('Binding rule is not found') error.add_info('Binding type', binding_type) error.add_info('args', args) raise error binding = rule.apply(**args) if binding: args['node'].add_binding(rule.apply(**args))
[ "def", "apply", "(", "self", ",", "binding_type", ",", "*", "*", "args", ")", ":", "rule", "=", "self", ".", "find_rule", "(", "binding_type", ",", "*", "*", "args", ")", "if", "rule", "is", "None", ":", "error", "=", "BindingError", "(", "'Binding rule is not found'", ")", "error", ".", "add_info", "(", "'Binding type'", ",", "binding_type", ")", "error", ".", "add_info", "(", "'args'", ",", "args", ")", "raise", "error", "binding", "=", "rule", ".", "apply", "(", "*", "*", "args", ")", "if", "binding", ":", "args", "[", "'node'", "]", ".", "add_binding", "(", "rule", ".", "apply", "(", "*", "*", "args", ")", ")" ]
Returns apply function
[ "Returns", "apply", "function" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/binding.py#L68-L78
eumis/pyviews
pyviews/binding/implementations.py
get_expression_target
def get_expression_target(expression: Expression, expr_vars: InheritedDict) -> BindingTarget: '''Factory method to create expression target''' root = expression.get_object_tree() if len(root.children) != 1 or not PROPERTY_EXPRESSION_REGEX.fullmatch(expression.code): error = BindingError('Expression should be property expression') error.add_info('Expression', expression.code) raise error if root.children[0].children: return PropertyExpressionTarget(expression, expr_vars) return GlobalValueExpressionTarget(expression, expr_vars)
python
def get_expression_target(expression: Expression, expr_vars: InheritedDict) -> BindingTarget: '''Factory method to create expression target''' root = expression.get_object_tree() if len(root.children) != 1 or not PROPERTY_EXPRESSION_REGEX.fullmatch(expression.code): error = BindingError('Expression should be property expression') error.add_info('Expression', expression.code) raise error if root.children[0].children: return PropertyExpressionTarget(expression, expr_vars) return GlobalValueExpressionTarget(expression, expr_vars)
[ "def", "get_expression_target", "(", "expression", ":", "Expression", ",", "expr_vars", ":", "InheritedDict", ")", "->", "BindingTarget", ":", "root", "=", "expression", ".", "get_object_tree", "(", ")", "if", "len", "(", "root", ".", "children", ")", "!=", "1", "or", "not", "PROPERTY_EXPRESSION_REGEX", ".", "fullmatch", "(", "expression", ".", "code", ")", ":", "error", "=", "BindingError", "(", "'Expression should be property expression'", ")", "error", ".", "add_info", "(", "'Expression'", ",", "expression", ".", "code", ")", "raise", "error", "if", "root", ".", "children", "[", "0", "]", ".", "children", ":", "return", "PropertyExpressionTarget", "(", "expression", ",", "expr_vars", ")", "return", "GlobalValueExpressionTarget", "(", "expression", ",", "expr_vars", ")" ]
Factory method to create expression target
[ "Factory", "method", "to", "create", "expression", "target" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/binding/implementations.py#L219-L228
eumis/pyviews
pyviews/binding/implementations.py
PropertyTarget.on_change
def on_change(self, value): '''Calles modifier on instance with passed value''' self._modifier(self.inst, self.prop, value)
python
def on_change(self, value): '''Calles modifier on instance with passed value''' self._modifier(self.inst, self.prop, value)
[ "def", "on_change", "(", "self", ",", "value", ")", ":", "self", ".", "_modifier", "(", "self", ".", "inst", ",", "self", ".", "prop", ",", "value", ")" ]
Calles modifier on instance with passed value
[ "Calles", "modifier", "on", "instance", "with", "passed", "value" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/binding/implementations.py#L18-L20
eumis/pyviews
pyviews/binding/implementations.py
Dependency.destroy
def destroy(self): '''Unsubscribes callback from observable''' self._observable.release(self._key, self._callback) self._observable = None self._key = None self._callback = None
python
def destroy(self): '''Unsubscribes callback from observable''' self._observable.release(self._key, self._callback) self._observable = None self._key = None self._callback = None
[ "def", "destroy", "(", "self", ")", ":", "self", ".", "_observable", ".", "release", "(", "self", ".", "_key", ",", "self", ".", "_callback", ")", "self", ".", "_observable", "=", "None", "self", ".", "_key", "=", "None", "self", ".", "_callback", "=", "None" ]
Unsubscribes callback from observable
[ "Unsubscribes", "callback", "from", "observable" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/binding/implementations.py#L37-L42
Workiva/furious
example/runner.py
args
def args(): """Add and parse the arguments for the script. url: the url of the example to run gae-sdk-path: this allows a user to point the script to their GAE SDK if it's not in /usr/local/google_appengine. """ parser = argparse.ArgumentParser(description='Run the Furious Examples.') parser.add_argument('--gae-sdk-path', metavar='S', dest="gae_lib_path", default="/usr/local/google_appengine", help='path to the GAE SDK') parser.add_argument('url', metavar='U', default="", nargs=1, help="the endpoint to run") return parser.parse_args()
python
def args(): """Add and parse the arguments for the script. url: the url of the example to run gae-sdk-path: this allows a user to point the script to their GAE SDK if it's not in /usr/local/google_appengine. """ parser = argparse.ArgumentParser(description='Run the Furious Examples.') parser.add_argument('--gae-sdk-path', metavar='S', dest="gae_lib_path", default="/usr/local/google_appengine", help='path to the GAE SDK') parser.add_argument('url', metavar='U', default="", nargs=1, help="the endpoint to run") return parser.parse_args()
[ "def", "args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Run the Furious Examples.'", ")", "parser", ".", "add_argument", "(", "'--gae-sdk-path'", ",", "metavar", "=", "'S'", ",", "dest", "=", "\"gae_lib_path\"", ",", "default", "=", "\"/usr/local/google_appengine\"", ",", "help", "=", "'path to the GAE SDK'", ")", "parser", ".", "add_argument", "(", "'url'", ",", "metavar", "=", "'U'", ",", "default", "=", "\"\"", ",", "nargs", "=", "1", ",", "help", "=", "\"the endpoint to run\"", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Add and parse the arguments for the script. url: the url of the example to run gae-sdk-path: this allows a user to point the script to their GAE SDK if it's not in /usr/local/google_appengine.
[ "Add", "and", "parse", "the", "arguments", "for", "the", "script", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/runner.py#L17-L33
Workiva/furious
example/runner.py
setup
def setup(options): """Grabs the gae_lib_path from the options and inserts it into the first index of the sys.path. Then calls GAE's fix_sys_path to get all the proper GAE paths included. :param options: """ sys.path.insert(0, options.gae_lib_path) from dev_appserver import fix_sys_path fix_sys_path()
python
def setup(options): """Grabs the gae_lib_path from the options and inserts it into the first index of the sys.path. Then calls GAE's fix_sys_path to get all the proper GAE paths included. :param options: """ sys.path.insert(0, options.gae_lib_path) from dev_appserver import fix_sys_path fix_sys_path()
[ "def", "setup", "(", "options", ")", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "options", ".", "gae_lib_path", ")", "from", "dev_appserver", "import", "fix_sys_path", "fix_sys_path", "(", ")" ]
Grabs the gae_lib_path from the options and inserts it into the first index of the sys.path. Then calls GAE's fix_sys_path to get all the proper GAE paths included. :param options:
[ "Grabs", "the", "gae_lib_path", "from", "the", "options", "and", "inserts", "it", "into", "the", "first", "index", "of", "the", "sys", ".", "path", ".", "Then", "calls", "GAE", "s", "fix_sys_path", "to", "get", "all", "the", "proper", "GAE", "paths", "included", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/runner.py#L36-L46
Workiva/furious
example/runner.py
run
def run(options): """Run the passed in url of the example using GAE's rpc runner. Uses appengine_rpc.HttpRpcServer to send a request to the url passed in via the options. :param options: """ from google.appengine.tools import appengine_rpc from google.appengine.tools import appcfg source = 'furious' # use the same user agent that GAE uses in appcfg user_agent = appcfg.GetUserAgent() # Since we're only using the dev server for now we can hard code these # values. This will need to change and accept these values as variables # when this is wired up to hit appspots. server = appengine_rpc.HttpRpcServer( 'localhost:8080', lambda: ('test@example.com', 'password'), user_agent, source, secure=False) # if no url is passed in just use the top level. url = "/" if options.url: url += options.url[0] # use the dev server authentication for now. server._DevAppServerAuthenticate() # send a simple GET request to the url server.Send(url, content_type="text/html; charset=utf-8", payload=None)
python
def run(options): """Run the passed in url of the example using GAE's rpc runner. Uses appengine_rpc.HttpRpcServer to send a request to the url passed in via the options. :param options: """ from google.appengine.tools import appengine_rpc from google.appengine.tools import appcfg source = 'furious' # use the same user agent that GAE uses in appcfg user_agent = appcfg.GetUserAgent() # Since we're only using the dev server for now we can hard code these # values. This will need to change and accept these values as variables # when this is wired up to hit appspots. server = appengine_rpc.HttpRpcServer( 'localhost:8080', lambda: ('test@example.com', 'password'), user_agent, source, secure=False) # if no url is passed in just use the top level. url = "/" if options.url: url += options.url[0] # use the dev server authentication for now. server._DevAppServerAuthenticate() # send a simple GET request to the url server.Send(url, content_type="text/html; charset=utf-8", payload=None)
[ "def", "run", "(", "options", ")", ":", "from", "google", ".", "appengine", ".", "tools", "import", "appengine_rpc", "from", "google", ".", "appengine", ".", "tools", "import", "appcfg", "source", "=", "'furious'", "# use the same user agent that GAE uses in appcfg", "user_agent", "=", "appcfg", ".", "GetUserAgent", "(", ")", "# Since we're only using the dev server for now we can hard code these", "# values. This will need to change and accept these values as variables", "# when this is wired up to hit appspots.", "server", "=", "appengine_rpc", ".", "HttpRpcServer", "(", "'localhost:8080'", ",", "lambda", ":", "(", "'test@example.com'", ",", "'password'", ")", ",", "user_agent", ",", "source", ",", "secure", "=", "False", ")", "# if no url is passed in just use the top level.", "url", "=", "\"/\"", "if", "options", ".", "url", ":", "url", "+=", "options", ".", "url", "[", "0", "]", "# use the dev server authentication for now.", "server", ".", "_DevAppServerAuthenticate", "(", ")", "# send a simple GET request to the url", "server", ".", "Send", "(", "url", ",", "content_type", "=", "\"text/html; charset=utf-8\"", ",", "payload", "=", "None", ")" ]
Run the passed in url of the example using GAE's rpc runner. Uses appengine_rpc.HttpRpcServer to send a request to the url passed in via the options. :param options:
[ "Run", "the", "passed", "in", "url", "of", "the", "example", "using", "GAE", "s", "rpc", "runner", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/runner.py#L49-L82
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/autoload_runner.py
AutoloadRunner.discover
def discover(self): """Enable and Disable SNMP communityon the device, Read it's structure and attributes: chassis, modules, submodules, ports, port-channels and power supplies :return: AutoLoadDetails object :rtype: cloudshell.shell.core.driver_context.AutoLoadDetails """ details = self.autoload_flow.execute_flow(self.resource_config.supported_os, self.resource_config.shell_name, self.resource_config.family, self.resource_config.name) self._log_device_details(details) return details
python
def discover(self): """Enable and Disable SNMP communityon the device, Read it's structure and attributes: chassis, modules, submodules, ports, port-channels and power supplies :return: AutoLoadDetails object :rtype: cloudshell.shell.core.driver_context.AutoLoadDetails """ details = self.autoload_flow.execute_flow(self.resource_config.supported_os, self.resource_config.shell_name, self.resource_config.family, self.resource_config.name) self._log_device_details(details) return details
[ "def", "discover", "(", "self", ")", ":", "details", "=", "self", ".", "autoload_flow", ".", "execute_flow", "(", "self", ".", "resource_config", ".", "supported_os", ",", "self", ".", "resource_config", ".", "shell_name", ",", "self", ".", "resource_config", ".", "family", ",", "self", ".", "resource_config", ".", "name", ")", "self", ".", "_log_device_details", "(", "details", ")", "return", "details" ]
Enable and Disable SNMP communityon the device, Read it's structure and attributes: chassis, modules, submodules, ports, port-channels and power supplies :return: AutoLoadDetails object :rtype: cloudshell.shell.core.driver_context.AutoLoadDetails
[ "Enable", "and", "Disable", "SNMP", "communityon", "the", "device", "Read", "it", "s", "structure", "and", "attributes", ":", "chassis", "modules", "submodules", "ports", "port", "-", "channels", "and", "power", "supplies" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/autoload_runner.py#L50-L64
horejsek/python-sqlpuzzle
sqlpuzzle/_queries/query.py
Query.has
def has(self, querypart_name, value=None): """Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`. """ querypart = self._queryparts.get(querypart_name) if not querypart: return False if not querypart.is_set: return False if value: return querypart.has(value) return True
python
def has(self, querypart_name, value=None): """Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`. """ querypart = self._queryparts.get(querypart_name) if not querypart: return False if not querypart.is_set: return False if value: return querypart.has(value) return True
[ "def", "has", "(", "self", ",", "querypart_name", ",", "value", "=", "None", ")", ":", "querypart", "=", "self", ".", "_queryparts", ".", "get", "(", "querypart_name", ")", "if", "not", "querypart", ":", "return", "False", "if", "not", "querypart", ".", "is_set", ":", "return", "False", "if", "value", ":", "return", "querypart", ".", "has", "(", "value", ")", "return", "True" ]
Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`.
[ "Returns", "True", "if", "querypart_name", "with", "value", "is", "set", "." ]
train
https://github.com/horejsek/python-sqlpuzzle/blob/d3a42ed1b339b8eafddb8d2c28a3a5832b3998dd/sqlpuzzle/_queries/query.py#L46-L61
eumis/pyviews
pyviews/compilation/expression.py
CompiledExpression.execute
def execute(self, parameters: dict = None): """Executes expression with passed parameters and returns result""" try: parameters = {} if parameters is None else parameters return eval(self._compiled_code, parameters, {}) except: info = exc_info() error = CompilationError('Error occurred in expression execution', self.code) error.add_cause(info[1]) raise error from info[1]
python
def execute(self, parameters: dict = None): """Executes expression with passed parameters and returns result""" try: parameters = {} if parameters is None else parameters return eval(self._compiled_code, parameters, {}) except: info = exc_info() error = CompilationError('Error occurred in expression execution', self.code) error.add_cause(info[1]) raise error from info[1]
[ "def", "execute", "(", "self", ",", "parameters", ":", "dict", "=", "None", ")", ":", "try", ":", "parameters", "=", "{", "}", "if", "parameters", "is", "None", "else", "parameters", "return", "eval", "(", "self", ".", "_compiled_code", ",", "parameters", ",", "{", "}", ")", "except", ":", "info", "=", "exc_info", "(", ")", "error", "=", "CompilationError", "(", "'Error occurred in expression execution'", ",", "self", ".", "code", ")", "error", ".", "add_cause", "(", "info", "[", "1", "]", ")", "raise", "error", "from", "info", "[", "1", "]" ]
Executes expression with passed parameters and returns result
[ "Executes", "expression", "with", "passed", "parameters", "and", "returns", "result" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/compilation/expression.py#L88-L97
Workiva/furious
example/abort_and_restart.py
aborting_function
def aborting_function(): """There is a 50% chance that this function will AbortAndRestart or complete successfully. The 50% chance simply represents a process that will fail half the time and succeed half the time. """ import random logging.info('In aborting_function') if random.random() < .5: from furious.errors import AbortAndRestart logging.info('Getting ready to restart') # Raise AbortAndRestart like an Exception, and watch the magic happen. raise AbortAndRestart() logging.info('No longer restarting')
python
def aborting_function(): """There is a 50% chance that this function will AbortAndRestart or complete successfully. The 50% chance simply represents a process that will fail half the time and succeed half the time. """ import random logging.info('In aborting_function') if random.random() < .5: from furious.errors import AbortAndRestart logging.info('Getting ready to restart') # Raise AbortAndRestart like an Exception, and watch the magic happen. raise AbortAndRestart() logging.info('No longer restarting')
[ "def", "aborting_function", "(", ")", ":", "import", "random", "logging", ".", "info", "(", "'In aborting_function'", ")", "if", "random", ".", "random", "(", ")", "<", ".5", ":", "from", "furious", ".", "errors", "import", "AbortAndRestart", "logging", ".", "info", "(", "'Getting ready to restart'", ")", "# Raise AbortAndRestart like an Exception, and watch the magic happen.", "raise", "AbortAndRestart", "(", ")", "logging", ".", "info", "(", "'No longer restarting'", ")" ]
There is a 50% chance that this function will AbortAndRestart or complete successfully. The 50% chance simply represents a process that will fail half the time and succeed half the time.
[ "There", "is", "a", "50%", "chance", "that", "this", "function", "will", "AbortAndRestart", "or", "complete", "successfully", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/abort_and_restart.py#L41-L60
datacamp/shellwhat
shellwhat/checks/check_funcs.py
strip_ansi
def strip_ansi(state): """Remove ANSI escape codes from student result.""" stu_res = _strip_ansi(state.student_result) return state.to_child(student_result = stu_res)
python
def strip_ansi(state): """Remove ANSI escape codes from student result.""" stu_res = _strip_ansi(state.student_result) return state.to_child(student_result = stu_res)
[ "def", "strip_ansi", "(", "state", ")", ":", "stu_res", "=", "_strip_ansi", "(", "state", ".", "student_result", ")", "return", "state", ".", "to_child", "(", "student_result", "=", "stu_res", ")" ]
Remove ANSI escape codes from student result.
[ "Remove", "ANSI", "escape", "codes", "from", "student", "result", "." ]
train
https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L9-L13
datacamp/shellwhat
shellwhat/checks/check_funcs.py
has_code
def has_code(state, text, incorrect_msg="The checker expected to find `{{text}}` in your command.", fixed=False): """Check whether the student code contains text. This function is a simpler override of the `has_code` function in protowhat, because ``ast_node._get_text()`` is not implemented in the OSH parser Using ``has_code()`` should be a last resort. It is always better to look at the result of code or the side effects they had on the state of your program. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student code. fixed: whether to match ``text`` exactly, rather than using regular expressions. :Example: Suppose the solution requires you to do: :: git push origin master The following SCT can be written: :: Ex().has_code(r'git\\s+push\\s+origin\\s+master') Submissions that would pass: :: git push origin master git push origin master Submissions that would fail: :: git push --force origin master """ stu_code = state.student_code # either simple text matching or regex test res = text in stu_code if fixed else re.search(text, stu_code) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text }) state.do_test(_msg) return state
python
def has_code(state, text, incorrect_msg="The checker expected to find `{{text}}` in your command.", fixed=False): """Check whether the student code contains text. This function is a simpler override of the `has_code` function in protowhat, because ``ast_node._get_text()`` is not implemented in the OSH parser Using ``has_code()`` should be a last resort. It is always better to look at the result of code or the side effects they had on the state of your program. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student code. fixed: whether to match ``text`` exactly, rather than using regular expressions. :Example: Suppose the solution requires you to do: :: git push origin master The following SCT can be written: :: Ex().has_code(r'git\\s+push\\s+origin\\s+master') Submissions that would pass: :: git push origin master git push origin master Submissions that would fail: :: git push --force origin master """ stu_code = state.student_code # either simple text matching or regex test res = text in stu_code if fixed else re.search(text, stu_code) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text }) state.do_test(_msg) return state
[ "def", "has_code", "(", "state", ",", "text", ",", "incorrect_msg", "=", "\"The checker expected to find `{{text}}` in your command.\"", ",", "fixed", "=", "False", ")", ":", "stu_code", "=", "state", ".", "student_code", "# either simple text matching or regex test", "res", "=", "text", "in", "stu_code", "if", "fixed", "else", "re", ".", "search", "(", "text", ",", "stu_code", ")", "if", "not", "res", ":", "_msg", "=", "state", ".", "build_message", "(", "incorrect_msg", ",", "fmt_kwargs", "=", "{", "'text'", ":", "text", "}", ")", "state", ".", "do_test", "(", "_msg", ")", "return", "state" ]
Check whether the student code contains text. This function is a simpler override of the `has_code` function in protowhat, because ``ast_node._get_text()`` is not implemented in the OSH parser Using ``has_code()`` should be a last resort. It is always better to look at the result of code or the side effects they had on the state of your program. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student code. fixed: whether to match ``text`` exactly, rather than using regular expressions. :Example: Suppose the solution requires you to do: :: git push origin master The following SCT can be written: :: Ex().has_code(r'git\\s+push\\s+origin\\s+master') Submissions that would pass: :: git push origin master git push origin master Submissions that would fail: :: git push --force origin master
[ "Check", "whether", "the", "student", "code", "contains", "text", "." ]
train
https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L15-L60
datacamp/shellwhat
shellwhat/checks/check_funcs.py
has_output
def has_output(state, text, incorrect_msg="The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.", fixed=False, strip_ansi=True): """Check whether student output contains specific text. Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``; they might be more fit for your use case. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. text : text that student output must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student output. fixed: whether to match ``text`` exactly, rather than using regular expressions. strip_ansi: whether to remove ANSI escape codes from output :Example: Suppose the solution requires you to do: :: echo 'this is a printout!' The following SCT can be written: :: Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out') Submissions that would pass: :: echo 'this is a print out' test='this is a printout!' && echo $test Submissions that would fail: :: echo 'this is a wrong printout' """ stu_output = state.student_result if strip_ansi: stu_output = _strip_ansi(stu_output) # either simple text matching or regex test res = text in stu_output if fixed else re.search(text, stu_output) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text, 'fixed': fixed }) state.do_test(_msg) return state
python
def has_output(state, text, incorrect_msg="The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.", fixed=False, strip_ansi=True): """Check whether student output contains specific text. Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``; they might be more fit for your use case. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. text : text that student output must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student output. fixed: whether to match ``text`` exactly, rather than using regular expressions. strip_ansi: whether to remove ANSI escape codes from output :Example: Suppose the solution requires you to do: :: echo 'this is a printout!' The following SCT can be written: :: Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out') Submissions that would pass: :: echo 'this is a print out' test='this is a printout!' && echo $test Submissions that would fail: :: echo 'this is a wrong printout' """ stu_output = state.student_result if strip_ansi: stu_output = _strip_ansi(stu_output) # either simple text matching or regex test res = text in stu_output if fixed else re.search(text, stu_output) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text, 'fixed': fixed }) state.do_test(_msg) return state
[ "def", "has_output", "(", "state", ",", "text", ",", "incorrect_msg", "=", "\"The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.\"", ",", "fixed", "=", "False", ",", "strip_ansi", "=", "True", ")", ":", "stu_output", "=", "state", ".", "student_result", "if", "strip_ansi", ":", "stu_output", "=", "_strip_ansi", "(", "stu_output", ")", "# either simple text matching or regex test", "res", "=", "text", "in", "stu_output", "if", "fixed", "else", "re", ".", "search", "(", "text", ",", "stu_output", ")", "if", "not", "res", ":", "_msg", "=", "state", ".", "build_message", "(", "incorrect_msg", ",", "fmt_kwargs", "=", "{", "'text'", ":", "text", ",", "'fixed'", ":", "fixed", "}", ")", "state", ".", "do_test", "(", "_msg", ")", "return", "state" ]
Check whether student output contains specific text. Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``; they might be more fit for your use case. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. text : text that student output must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student output. fixed: whether to match ``text`` exactly, rather than using regular expressions. strip_ansi: whether to remove ANSI escape codes from output :Example: Suppose the solution requires you to do: :: echo 'this is a printout!' The following SCT can be written: :: Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out') Submissions that would pass: :: echo 'this is a print out' test='this is a printout!' && echo $test Submissions that would fail: :: echo 'this is a wrong printout'
[ "Check", "whether", "student", "output", "contains", "specific", "text", "." ]
train
https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L62-L111
datacamp/shellwhat
shellwhat/checks/check_funcs.py
has_cwd
def has_cwd(state, dir, incorrect_msg="Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there."): """Check whether the student is in the expected directory. This check is typically used before using ``has_expr_output()`` to make sure the student didn't navigate somewhere else. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. dir: Directory that the student should be in. Always use the absolute path. incorrect_msg: If specified, this overrides the automatically generated message in case the student is not in the expected directory. :Example: If you want to be sure that the student is in ``/home/repl/my_dir``: :: Ex().has_cwd('/home/repl/my_dir') """ expr = "[[ $PWD == '{}' ]]".format(dir) _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'dir': dir }) has_expr_exit_code(state, expr, output="0", incorrect_msg=_msg) return state
python
def has_cwd(state, dir, incorrect_msg="Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there."): """Check whether the student is in the expected directory. This check is typically used before using ``has_expr_output()`` to make sure the student didn't navigate somewhere else. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. dir: Directory that the student should be in. Always use the absolute path. incorrect_msg: If specified, this overrides the automatically generated message in case the student is not in the expected directory. :Example: If you want to be sure that the student is in ``/home/repl/my_dir``: :: Ex().has_cwd('/home/repl/my_dir') """ expr = "[[ $PWD == '{}' ]]".format(dir) _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'dir': dir }) has_expr_exit_code(state, expr, output="0", incorrect_msg=_msg) return state
[ "def", "has_cwd", "(", "state", ",", "dir", ",", "incorrect_msg", "=", "\"Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there.\"", ")", ":", "expr", "=", "\"[[ $PWD == '{}' ]]\"", ".", "format", "(", "dir", ")", "_msg", "=", "state", ".", "build_message", "(", "incorrect_msg", ",", "fmt_kwargs", "=", "{", "'dir'", ":", "dir", "}", ")", "has_expr_exit_code", "(", "state", ",", "expr", ",", "output", "=", "\"0\"", ",", "incorrect_msg", "=", "_msg", ")", "return", "state" ]
Check whether the student is in the expected directory. This check is typically used before using ``has_expr_output()`` to make sure the student didn't navigate somewhere else. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. dir: Directory that the student should be in. Always use the absolute path. incorrect_msg: If specified, this overrides the automatically generated message in case the student is not in the expected directory. :Example: If you want to be sure that the student is in ``/home/repl/my_dir``: :: Ex().has_cwd('/home/repl/my_dir')
[ "Check", "whether", "the", "student", "is", "in", "the", "expected", "directory", "." ]
train
https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L113-L135
internetarchive/doublethink
doublethink/services.py
ServiceRegistry.heartbeat
def heartbeat(self, status_info): ''' Update service status, indicating "up"-ness. Args: status_info (dict): a dictionary representing the status of the service `status_info` must have at least the fields 'role', 'load', and 'ttl'. Some additional fields are populated automatically by this method. If the field 'id' is absent, it will be generated by rethinkdb. See the ServiceRegistry class-level documentation for more information about the various fields. Returns: On success, returns the modified status info dict. On failure communicating with rethinkdb, returns `status_info` unmodified. Raises: Exception: if `status_info` is missing a required field, or a `status_info['ttl']` is not a number greater than zero ''' for field in 'role', 'ttl', 'load': if not field in status_info: raise Exception( 'status_info is missing required field %s', repr(field)) val = status_info['ttl'] if not (isinstance(val, float) or isinstance(val, int)) or val <= 0: raise Exception('ttl must be a number > 0') updated_status_info = dict(status_info) updated_status_info['last_heartbeat'] = r.now() if not 'first_heartbeat' in updated_status_info: updated_status_info['first_heartbeat'] = updated_status_info['last_heartbeat'] if not 'host' in updated_status_info: updated_status_info['host'] = socket.gethostname() if not 'pid' in updated_status_info: updated_status_info['pid'] = os.getpid() try: result = self.rr.table(self.table).insert( updated_status_info, conflict='replace', return_changes=True).run() return result['changes'][0]['new_val'] # XXX check except: self.logger.error('error updating service registry', exc_info=True) return status_info
python
def heartbeat(self, status_info): ''' Update service status, indicating "up"-ness. Args: status_info (dict): a dictionary representing the status of the service `status_info` must have at least the fields 'role', 'load', and 'ttl'. Some additional fields are populated automatically by this method. If the field 'id' is absent, it will be generated by rethinkdb. See the ServiceRegistry class-level documentation for more information about the various fields. Returns: On success, returns the modified status info dict. On failure communicating with rethinkdb, returns `status_info` unmodified. Raises: Exception: if `status_info` is missing a required field, or a `status_info['ttl']` is not a number greater than zero ''' for field in 'role', 'ttl', 'load': if not field in status_info: raise Exception( 'status_info is missing required field %s', repr(field)) val = status_info['ttl'] if not (isinstance(val, float) or isinstance(val, int)) or val <= 0: raise Exception('ttl must be a number > 0') updated_status_info = dict(status_info) updated_status_info['last_heartbeat'] = r.now() if not 'first_heartbeat' in updated_status_info: updated_status_info['first_heartbeat'] = updated_status_info['last_heartbeat'] if not 'host' in updated_status_info: updated_status_info['host'] = socket.gethostname() if not 'pid' in updated_status_info: updated_status_info['pid'] = os.getpid() try: result = self.rr.table(self.table).insert( updated_status_info, conflict='replace', return_changes=True).run() return result['changes'][0]['new_val'] # XXX check except: self.logger.error('error updating service registry', exc_info=True) return status_info
[ "def", "heartbeat", "(", "self", ",", "status_info", ")", ":", "for", "field", "in", "'role'", ",", "'ttl'", ",", "'load'", ":", "if", "not", "field", "in", "status_info", ":", "raise", "Exception", "(", "'status_info is missing required field %s'", ",", "repr", "(", "field", ")", ")", "val", "=", "status_info", "[", "'ttl'", "]", "if", "not", "(", "isinstance", "(", "val", ",", "float", ")", "or", "isinstance", "(", "val", ",", "int", ")", ")", "or", "val", "<=", "0", ":", "raise", "Exception", "(", "'ttl must be a number > 0'", ")", "updated_status_info", "=", "dict", "(", "status_info", ")", "updated_status_info", "[", "'last_heartbeat'", "]", "=", "r", ".", "now", "(", ")", "if", "not", "'first_heartbeat'", "in", "updated_status_info", ":", "updated_status_info", "[", "'first_heartbeat'", "]", "=", "updated_status_info", "[", "'last_heartbeat'", "]", "if", "not", "'host'", "in", "updated_status_info", ":", "updated_status_info", "[", "'host'", "]", "=", "socket", ".", "gethostname", "(", ")", "if", "not", "'pid'", "in", "updated_status_info", ":", "updated_status_info", "[", "'pid'", "]", "=", "os", ".", "getpid", "(", ")", "try", ":", "result", "=", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ")", ".", "insert", "(", "updated_status_info", ",", "conflict", "=", "'replace'", ",", "return_changes", "=", "True", ")", ".", "run", "(", ")", "return", "result", "[", "'changes'", "]", "[", "0", "]", "[", "'new_val'", "]", "# XXX check", "except", ":", "self", ".", "logger", ".", "error", "(", "'error updating service registry'", ",", "exc_info", "=", "True", ")", "return", "status_info" ]
Update service status, indicating "up"-ness. Args: status_info (dict): a dictionary representing the status of the service `status_info` must have at least the fields 'role', 'load', and 'ttl'. Some additional fields are populated automatically by this method. If the field 'id' is absent, it will be generated by rethinkdb. See the ServiceRegistry class-level documentation for more information about the various fields. Returns: On success, returns the modified status info dict. On failure communicating with rethinkdb, returns `status_info` unmodified. Raises: Exception: if `status_info` is missing a required field, or a `status_info['ttl']` is not a number greater than zero
[ "Update", "service", "status", "indicating", "up", "-", "ness", "." ]
train
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/services.py#L115-L161
internetarchive/doublethink
doublethink/services.py
ServiceRegistry.unregister
def unregister(self, id): ''' Remove the service with id `id` from the service registry. ''' result = self.rr.table(self.table).get(id).delete().run() if result != { 'deleted':1, 'errors':0,'inserted':0, 'replaced':0,'skipped':0,'unchanged':0}: self.logger.warn( 'unexpected result attempting to delete id=%s from ' 'rethinkdb services table: %s', id, result)
python
def unregister(self, id): ''' Remove the service with id `id` from the service registry. ''' result = self.rr.table(self.table).get(id).delete().run() if result != { 'deleted':1, 'errors':0,'inserted':0, 'replaced':0,'skipped':0,'unchanged':0}: self.logger.warn( 'unexpected result attempting to delete id=%s from ' 'rethinkdb services table: %s', id, result)
[ "def", "unregister", "(", "self", ",", "id", ")", ":", "result", "=", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ")", ".", "get", "(", "id", ")", ".", "delete", "(", ")", ".", "run", "(", ")", "if", "result", "!=", "{", "'deleted'", ":", "1", ",", "'errors'", ":", "0", ",", "'inserted'", ":", "0", ",", "'replaced'", ":", "0", ",", "'skipped'", ":", "0", ",", "'unchanged'", ":", "0", "}", ":", "self", ".", "logger", ".", "warn", "(", "'unexpected result attempting to delete id=%s from '", "'rethinkdb services table: %s'", ",", "id", ",", "result", ")" ]
Remove the service with id `id` from the service registry.
[ "Remove", "the", "service", "with", "id", "id", "from", "the", "service", "registry", "." ]
train
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/services.py#L163-L173
internetarchive/doublethink
doublethink/services.py
ServiceRegistry.unique_service
def unique_service(self, role, candidate=None): ''' Retrieve a unique service, possibly setting or heartbeating it first. A "unique service" is a service with only one instance for a given role. Uniqueness is enforced by using the role name as the primary key `{'id':role, ...}`. Args: role (str): role name candidate (dict): if supplied, candidate info for the unique service, explained below `candidate` normally represents "myself, this instance of the service". When a service supplies `candidate`, it is nominating itself for selection as the unique service, or retaining its claim to the role (heartbeating). If `candidate` is supplied: First, atomically in a single rethinkdb query, checks if there is already a unique healthy instance of this service in rethinkdb, and if not, sets `candidate` as the unique service. Looks at the result of that query to determine if `candidate` is the unique service or not. If it is, updates 'last_heartbeat' in rethinkdb. To determine whether `candidate` is the unique service, checks that all the fields other than 'first_heartbeat' and 'last_heartbeat' have the same value in `candidate` as in the value returned from rethinkdb. ***Important***: this means that the caller must ensure that none of the fields of the unique service ever change. Don't store things like 'load' or any other volatile value in there. If you try to do that, heartbeats will end up not being sent, and the unique service will flap among the candidates. Finally, retrieves the service from rethinkdb and returns it, if it is healthy. Returns: the unique service, if there is one and it is healthy, otherwise None ''' # use the same concept of 'now' for all queries now = doublethink.utcnow() if candidate is not None: candidate['id'] = role if not 'ttl' in candidate: raise Exception("candidate is missing required field 'ttl'") val = candidate['ttl'] if not (isinstance(val, float) or isinstance(val, int)) or val <= 0: raise Exception("'ttl' must be a number > 0") candidate['first_heartbeat'] = now candidate['last_heartbeat'] = now if not 'host' in candidate: candidate['host'] = socket.gethostname() if not 'pid' in candidate: candidate['pid'] = os.getpid() result = self.rr.table( self.table, read_mode='majority').get(role).replace( lambda row: r.branch( r.branch( row, row['last_heartbeat'] > now - row['ttl'], False), row, candidate), return_changes='always').run() new_val = result['changes'][0]['new_val'] if all([new_val.get(k) == candidate[k] for k in candidate if k not in ('first_heartbeat', 'last_heartbeat')]): # candidate is the unique_service, send a heartbeat del candidate['first_heartbeat'] # don't touch first_heartbeat self.rr.table(self.table).get(role).update(candidate).run() results = list(self.rr.table( self.table, read_mode='majority').get_all(role).filter( lambda row: row['last_heartbeat'] > now - row['ttl']).run()) if results: return results[0] else: return None
python
def unique_service(self, role, candidate=None): ''' Retrieve a unique service, possibly setting or heartbeating it first. A "unique service" is a service with only one instance for a given role. Uniqueness is enforced by using the role name as the primary key `{'id':role, ...}`. Args: role (str): role name candidate (dict): if supplied, candidate info for the unique service, explained below `candidate` normally represents "myself, this instance of the service". When a service supplies `candidate`, it is nominating itself for selection as the unique service, or retaining its claim to the role (heartbeating). If `candidate` is supplied: First, atomically in a single rethinkdb query, checks if there is already a unique healthy instance of this service in rethinkdb, and if not, sets `candidate` as the unique service. Looks at the result of that query to determine if `candidate` is the unique service or not. If it is, updates 'last_heartbeat' in rethinkdb. To determine whether `candidate` is the unique service, checks that all the fields other than 'first_heartbeat' and 'last_heartbeat' have the same value in `candidate` as in the value returned from rethinkdb. ***Important***: this means that the caller must ensure that none of the fields of the unique service ever change. Don't store things like 'load' or any other volatile value in there. If you try to do that, heartbeats will end up not being sent, and the unique service will flap among the candidates. Finally, retrieves the service from rethinkdb and returns it, if it is healthy. Returns: the unique service, if there is one and it is healthy, otherwise None ''' # use the same concept of 'now' for all queries now = doublethink.utcnow() if candidate is not None: candidate['id'] = role if not 'ttl' in candidate: raise Exception("candidate is missing required field 'ttl'") val = candidate['ttl'] if not (isinstance(val, float) or isinstance(val, int)) or val <= 0: raise Exception("'ttl' must be a number > 0") candidate['first_heartbeat'] = now candidate['last_heartbeat'] = now if not 'host' in candidate: candidate['host'] = socket.gethostname() if not 'pid' in candidate: candidate['pid'] = os.getpid() result = self.rr.table( self.table, read_mode='majority').get(role).replace( lambda row: r.branch( r.branch( row, row['last_heartbeat'] > now - row['ttl'], False), row, candidate), return_changes='always').run() new_val = result['changes'][0]['new_val'] if all([new_val.get(k) == candidate[k] for k in candidate if k not in ('first_heartbeat', 'last_heartbeat')]): # candidate is the unique_service, send a heartbeat del candidate['first_heartbeat'] # don't touch first_heartbeat self.rr.table(self.table).get(role).update(candidate).run() results = list(self.rr.table( self.table, read_mode='majority').get_all(role).filter( lambda row: row['last_heartbeat'] > now - row['ttl']).run()) if results: return results[0] else: return None
[ "def", "unique_service", "(", "self", ",", "role", ",", "candidate", "=", "None", ")", ":", "# use the same concept of 'now' for all queries", "now", "=", "doublethink", ".", "utcnow", "(", ")", "if", "candidate", "is", "not", "None", ":", "candidate", "[", "'id'", "]", "=", "role", "if", "not", "'ttl'", "in", "candidate", ":", "raise", "Exception", "(", "\"candidate is missing required field 'ttl'\"", ")", "val", "=", "candidate", "[", "'ttl'", "]", "if", "not", "(", "isinstance", "(", "val", ",", "float", ")", "or", "isinstance", "(", "val", ",", "int", ")", ")", "or", "val", "<=", "0", ":", "raise", "Exception", "(", "\"'ttl' must be a number > 0\"", ")", "candidate", "[", "'first_heartbeat'", "]", "=", "now", "candidate", "[", "'last_heartbeat'", "]", "=", "now", "if", "not", "'host'", "in", "candidate", ":", "candidate", "[", "'host'", "]", "=", "socket", ".", "gethostname", "(", ")", "if", "not", "'pid'", "in", "candidate", ":", "candidate", "[", "'pid'", "]", "=", "os", ".", "getpid", "(", ")", "result", "=", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ",", "read_mode", "=", "'majority'", ")", ".", "get", "(", "role", ")", ".", "replace", "(", "lambda", "row", ":", "r", ".", "branch", "(", "r", ".", "branch", "(", "row", ",", "row", "[", "'last_heartbeat'", "]", ">", "now", "-", "row", "[", "'ttl'", "]", ",", "False", ")", ",", "row", ",", "candidate", ")", ",", "return_changes", "=", "'always'", ")", ".", "run", "(", ")", "new_val", "=", "result", "[", "'changes'", "]", "[", "0", "]", "[", "'new_val'", "]", "if", "all", "(", "[", "new_val", ".", "get", "(", "k", ")", "==", "candidate", "[", "k", "]", "for", "k", "in", "candidate", "if", "k", "not", "in", "(", "'first_heartbeat'", ",", "'last_heartbeat'", ")", "]", ")", ":", "# candidate is the unique_service, send a heartbeat", "del", "candidate", "[", "'first_heartbeat'", "]", "# don't touch first_heartbeat", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ")", ".", "get", "(", "role", ")", ".", "update", "(", "candidate", ")", ".", "run", "(", ")", "results", "=", "list", "(", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ",", "read_mode", "=", "'majority'", ")", ".", "get_all", "(", "role", ")", ".", "filter", "(", "lambda", "row", ":", "row", "[", "'last_heartbeat'", "]", ">", "now", "-", "row", "[", "'ttl'", "]", ")", ".", "run", "(", ")", ")", "if", "results", ":", "return", "results", "[", "0", "]", "else", ":", "return", "None" ]
Retrieve a unique service, possibly setting or heartbeating it first. A "unique service" is a service with only one instance for a given role. Uniqueness is enforced by using the role name as the primary key `{'id':role, ...}`. Args: role (str): role name candidate (dict): if supplied, candidate info for the unique service, explained below `candidate` normally represents "myself, this instance of the service". When a service supplies `candidate`, it is nominating itself for selection as the unique service, or retaining its claim to the role (heartbeating). If `candidate` is supplied: First, atomically in a single rethinkdb query, checks if there is already a unique healthy instance of this service in rethinkdb, and if not, sets `candidate` as the unique service. Looks at the result of that query to determine if `candidate` is the unique service or not. If it is, updates 'last_heartbeat' in rethinkdb. To determine whether `candidate` is the unique service, checks that all the fields other than 'first_heartbeat' and 'last_heartbeat' have the same value in `candidate` as in the value returned from rethinkdb. ***Important***: this means that the caller must ensure that none of the fields of the unique service ever change. Don't store things like 'load' or any other volatile value in there. If you try to do that, heartbeats will end up not being sent, and the unique service will flap among the candidates. Finally, retrieves the service from rethinkdb and returns it, if it is healthy. Returns: the unique service, if there is one and it is healthy, otherwise None
[ "Retrieve", "a", "unique", "service", "possibly", "setting", "or", "heartbeating", "it", "first", "." ]
train
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/services.py#L175-L261
internetarchive/doublethink
doublethink/services.py
ServiceRegistry.healthy_services
def healthy_services(self, role=None): ''' Look up healthy services in the registry. A service is considered healthy if its 'last_heartbeat' was less than 'ttl' seconds ago Args: role (str, optional): role name Returns: If `role` is supplied, returns list of healthy services for the given role, otherwise returns list of all healthy services. May return an empty list. ''' try: query = self.rr.table(self.table) if role: query = query.get_all(role, index='role') query = query.filter( lambda svc: r.now().sub(svc["last_heartbeat"]) < svc["ttl"] #.default(20.0) ).order_by("load") result = query.run() return result except r.ReqlNonExistenceError: return []
python
def healthy_services(self, role=None): ''' Look up healthy services in the registry. A service is considered healthy if its 'last_heartbeat' was less than 'ttl' seconds ago Args: role (str, optional): role name Returns: If `role` is supplied, returns list of healthy services for the given role, otherwise returns list of all healthy services. May return an empty list. ''' try: query = self.rr.table(self.table) if role: query = query.get_all(role, index='role') query = query.filter( lambda svc: r.now().sub(svc["last_heartbeat"]) < svc["ttl"] #.default(20.0) ).order_by("load") result = query.run() return result except r.ReqlNonExistenceError: return []
[ "def", "healthy_services", "(", "self", ",", "role", "=", "None", ")", ":", "try", ":", "query", "=", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ")", "if", "role", ":", "query", "=", "query", ".", "get_all", "(", "role", ",", "index", "=", "'role'", ")", "query", "=", "query", ".", "filter", "(", "lambda", "svc", ":", "r", ".", "now", "(", ")", ".", "sub", "(", "svc", "[", "\"last_heartbeat\"", "]", ")", "<", "svc", "[", "\"ttl\"", "]", "#.default(20.0)", ")", ".", "order_by", "(", "\"load\"", ")", "result", "=", "query", ".", "run", "(", ")", "return", "result", "except", "r", ".", "ReqlNonExistenceError", ":", "return", "[", "]" ]
Look up healthy services in the registry. A service is considered healthy if its 'last_heartbeat' was less than 'ttl' seconds ago Args: role (str, optional): role name Returns: If `role` is supplied, returns list of healthy services for the given role, otherwise returns list of all healthy services. May return an empty list.
[ "Look", "up", "healthy", "services", "in", "the", "registry", "." ]
train
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/services.py#L285-L310
Workiva/furious
furious/context/__init__.py
new
def new(batch_size=None, **options): """Get a new furious context and add it to the registry. If a batch size is specified, use an AutoContext which inserts tasks in batches as they are added to the context. """ if batch_size: new_context = AutoContext(batch_size=batch_size, **options) else: new_context = Context(**options) _local.get_local_context().registry.append(new_context) return new_context
python
def new(batch_size=None, **options): """Get a new furious context and add it to the registry. If a batch size is specified, use an AutoContext which inserts tasks in batches as they are added to the context. """ if batch_size: new_context = AutoContext(batch_size=batch_size, **options) else: new_context = Context(**options) _local.get_local_context().registry.append(new_context) return new_context
[ "def", "new", "(", "batch_size", "=", "None", ",", "*", "*", "options", ")", ":", "if", "batch_size", ":", "new_context", "=", "AutoContext", "(", "batch_size", "=", "batch_size", ",", "*", "*", "options", ")", "else", ":", "new_context", "=", "Context", "(", "*", "*", "options", ")", "_local", ".", "get_local_context", "(", ")", ".", "registry", ".", "append", "(", "new_context", ")", "return", "new_context" ]
Get a new furious context and add it to the registry. If a batch size is specified, use an AutoContext which inserts tasks in batches as they are added to the context.
[ "Get", "a", "new", "furious", "context", "and", "add", "it", "to", "the", "registry", ".", "If", "a", "batch", "size", "is", "specified", "use", "an", "AutoContext", "which", "inserts", "tasks", "in", "batches", "as", "they", "are", "added", "to", "the", "context", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/__init__.py#L53-L66
Workiva/furious
furious/context/__init__.py
get_current_async
def get_current_async(): """Return a reference to the currently executing Async job object or None if not in an Async job. """ local_context = _local.get_local_context() if local_context._executing_async: return local_context._executing_async[-1] raise errors.NotInContextError('Not in an _ExecutionContext.')
python
def get_current_async(): """Return a reference to the currently executing Async job object or None if not in an Async job. """ local_context = _local.get_local_context() if local_context._executing_async: return local_context._executing_async[-1] raise errors.NotInContextError('Not in an _ExecutionContext.')
[ "def", "get_current_async", "(", ")", ":", "local_context", "=", "_local", ".", "get_local_context", "(", ")", "if", "local_context", ".", "_executing_async", ":", "return", "local_context", ".", "_executing_async", "[", "-", "1", "]", "raise", "errors", ".", "NotInContextError", "(", "'Not in an _ExecutionContext.'", ")" ]
Return a reference to the currently executing Async job object or None if not in an Async job.
[ "Return", "a", "reference", "to", "the", "currently", "executing", "Async", "job", "object", "or", "None", "if", "not", "in", "an", "Async", "job", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/__init__.py#L69-L78
Workiva/furious
furious/context/__init__.py
get_current_context
def get_current_context(): """Return a reference to the current Context object. """ local_context = _local.get_local_context() if local_context.registry: return local_context.registry[-1] raise errors.NotInContextError('Not in a Context.')
python
def get_current_context(): """Return a reference to the current Context object. """ local_context = _local.get_local_context() if local_context.registry: return local_context.registry[-1] raise errors.NotInContextError('Not in a Context.')
[ "def", "get_current_context", "(", ")", ":", "local_context", "=", "_local", ".", "get_local_context", "(", ")", "if", "local_context", ".", "registry", ":", "return", "local_context", ".", "registry", "[", "-", "1", "]", "raise", "errors", ".", "NotInContextError", "(", "'Not in a Context.'", ")" ]
Return a reference to the current Context object.
[ "Return", "a", "reference", "to", "the", "current", "Context", "object", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/__init__.py#L81-L89
Workiva/furious
furious/extras/xsrf.py
XSRFToken.generate_token_string
def generate_token_string(self, action=None): """Generate a hash of the given token contents that can be verified. :param action: A string representing the action that the generated hash is valid for. This string is usually a URL. :returns: A string containing the hash contents of the given `action` and the contents of the `XSRFToken`. Can be verified with `verify_token_string`. The string is base64 encoded so it is safe to use in HTML forms without escaping. """ digest_maker = self._digest_maker() digest_maker.update(self.user_id) digest_maker.update(self._DELIMITER) if action: digest_maker.update(action) digest_maker.update(self._DELIMITER) digest_maker.update(str(self.current_time)) return base64.urlsafe_b64encode( self._DELIMITER.join([digest_maker.hexdigest(), str(self.current_time)]))
python
def generate_token_string(self, action=None): """Generate a hash of the given token contents that can be verified. :param action: A string representing the action that the generated hash is valid for. This string is usually a URL. :returns: A string containing the hash contents of the given `action` and the contents of the `XSRFToken`. Can be verified with `verify_token_string`. The string is base64 encoded so it is safe to use in HTML forms without escaping. """ digest_maker = self._digest_maker() digest_maker.update(self.user_id) digest_maker.update(self._DELIMITER) if action: digest_maker.update(action) digest_maker.update(self._DELIMITER) digest_maker.update(str(self.current_time)) return base64.urlsafe_b64encode( self._DELIMITER.join([digest_maker.hexdigest(), str(self.current_time)]))
[ "def", "generate_token_string", "(", "self", ",", "action", "=", "None", ")", ":", "digest_maker", "=", "self", ".", "_digest_maker", "(", ")", "digest_maker", ".", "update", "(", "self", ".", "user_id", ")", "digest_maker", ".", "update", "(", "self", ".", "_DELIMITER", ")", "if", "action", ":", "digest_maker", ".", "update", "(", "action", ")", "digest_maker", ".", "update", "(", "self", ".", "_DELIMITER", ")", "digest_maker", ".", "update", "(", "str", "(", "self", ".", "current_time", ")", ")", "return", "base64", ".", "urlsafe_b64encode", "(", "self", ".", "_DELIMITER", ".", "join", "(", "[", "digest_maker", ".", "hexdigest", "(", ")", ",", "str", "(", "self", ".", "current_time", ")", "]", ")", ")" ]
Generate a hash of the given token contents that can be verified. :param action: A string representing the action that the generated hash is valid for. This string is usually a URL. :returns: A string containing the hash contents of the given `action` and the contents of the `XSRFToken`. Can be verified with `verify_token_string`. The string is base64 encoded so it is safe to use in HTML forms without escaping.
[ "Generate", "a", "hash", "of", "the", "given", "token", "contents", "that", "can", "be", "verified", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/xsrf.py#L57-L79
Workiva/furious
furious/extras/xsrf.py
XSRFToken.verify_token_string
def verify_token_string(self, token_string, action=None, timeout=None, current_time=None): """Generate a hash of the given token contents that can be verified. :param token_string: A string containing the hashed token (generated by `generate_token_string`). :param action: A string containing the action that is being verified. :param timeout: An int or float representing the number of seconds that the token is valid for. If None then tokens are valid forever. :current_time: An int representing the number of seconds since the epoch. Will be used by to check for token expiry if `timeout` is set. If `None` then the current time will be used. :raises: XSRFTokenMalformed if the given token_string cannot be parsed. XSRFTokenExpiredException if the given token string is expired. XSRFTokenInvalid if the given token string does not match the contents of the `XSRFToken`. """ try: decoded_token_string = base64.urlsafe_b64decode(token_string) except TypeError: raise XSRFTokenMalformed() split_token = decoded_token_string.split(self._DELIMITER) if len(split_token) != 2: raise XSRFTokenMalformed() try: token_time = int(split_token[1]) except ValueError: raise XSRFTokenMalformed() if timeout is not None: if current_time is None: current_time = time.time() # If an attacker modifies the plain text time then it will not match # the hashed time so this check is sufficient. if (token_time + timeout) < current_time: raise XSRFTokenExpiredException() expected_token = XSRFToken(self.user_id, self.secret, token_time) expected_token_string = expected_token.generate_token_string(action) if len(expected_token_string) != len(token_string): raise XSRFTokenInvalid() # Compare the two strings in constant time to prevent timing attacks. different = 0 for a, b in zip(token_string, expected_token_string): different |= ord(a) ^ ord(b) if different: raise XSRFTokenInvalid()
python
def verify_token_string(self, token_string, action=None, timeout=None, current_time=None): """Generate a hash of the given token contents that can be verified. :param token_string: A string containing the hashed token (generated by `generate_token_string`). :param action: A string containing the action that is being verified. :param timeout: An int or float representing the number of seconds that the token is valid for. If None then tokens are valid forever. :current_time: An int representing the number of seconds since the epoch. Will be used by to check for token expiry if `timeout` is set. If `None` then the current time will be used. :raises: XSRFTokenMalformed if the given token_string cannot be parsed. XSRFTokenExpiredException if the given token string is expired. XSRFTokenInvalid if the given token string does not match the contents of the `XSRFToken`. """ try: decoded_token_string = base64.urlsafe_b64decode(token_string) except TypeError: raise XSRFTokenMalformed() split_token = decoded_token_string.split(self._DELIMITER) if len(split_token) != 2: raise XSRFTokenMalformed() try: token_time = int(split_token[1]) except ValueError: raise XSRFTokenMalformed() if timeout is not None: if current_time is None: current_time = time.time() # If an attacker modifies the plain text time then it will not match # the hashed time so this check is sufficient. if (token_time + timeout) < current_time: raise XSRFTokenExpiredException() expected_token = XSRFToken(self.user_id, self.secret, token_time) expected_token_string = expected_token.generate_token_string(action) if len(expected_token_string) != len(token_string): raise XSRFTokenInvalid() # Compare the two strings in constant time to prevent timing attacks. different = 0 for a, b in zip(token_string, expected_token_string): different |= ord(a) ^ ord(b) if different: raise XSRFTokenInvalid()
[ "def", "verify_token_string", "(", "self", ",", "token_string", ",", "action", "=", "None", ",", "timeout", "=", "None", ",", "current_time", "=", "None", ")", ":", "try", ":", "decoded_token_string", "=", "base64", ".", "urlsafe_b64decode", "(", "token_string", ")", "except", "TypeError", ":", "raise", "XSRFTokenMalformed", "(", ")", "split_token", "=", "decoded_token_string", ".", "split", "(", "self", ".", "_DELIMITER", ")", "if", "len", "(", "split_token", ")", "!=", "2", ":", "raise", "XSRFTokenMalformed", "(", ")", "try", ":", "token_time", "=", "int", "(", "split_token", "[", "1", "]", ")", "except", "ValueError", ":", "raise", "XSRFTokenMalformed", "(", ")", "if", "timeout", "is", "not", "None", ":", "if", "current_time", "is", "None", ":", "current_time", "=", "time", ".", "time", "(", ")", "# If an attacker modifies the plain text time then it will not match", "# the hashed time so this check is sufficient.", "if", "(", "token_time", "+", "timeout", ")", "<", "current_time", ":", "raise", "XSRFTokenExpiredException", "(", ")", "expected_token", "=", "XSRFToken", "(", "self", ".", "user_id", ",", "self", ".", "secret", ",", "token_time", ")", "expected_token_string", "=", "expected_token", ".", "generate_token_string", "(", "action", ")", "if", "len", "(", "expected_token_string", ")", "!=", "len", "(", "token_string", ")", ":", "raise", "XSRFTokenInvalid", "(", ")", "# Compare the two strings in constant time to prevent timing attacks.", "different", "=", "0", "for", "a", ",", "b", "in", "zip", "(", "token_string", ",", "expected_token_string", ")", ":", "different", "|=", "ord", "(", "a", ")", "^", "ord", "(", "b", ")", "if", "different", ":", "raise", "XSRFTokenInvalid", "(", ")" ]
Generate a hash of the given token contents that can be verified. :param token_string: A string containing the hashed token (generated by `generate_token_string`). :param action: A string containing the action that is being verified. :param timeout: An int or float representing the number of seconds that the token is valid for. If None then tokens are valid forever. :current_time: An int representing the number of seconds since the epoch. Will be used by to check for token expiry if `timeout` is set. If `None` then the current time will be used. :raises: XSRFTokenMalformed if the given token_string cannot be parsed. XSRFTokenExpiredException if the given token string is expired. XSRFTokenInvalid if the given token string does not match the contents of the `XSRFToken`.
[ "Generate", "a", "hash", "of", "the", "given", "token", "contents", "that", "can", "be", "verified", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/xsrf.py#L81-L139
tommyjcarpenter/dictsearch
dictsearch/search.py
iterate_dictionary
def iterate_dictionary(d, path, squash_single = False): """ Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS. The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function iterate_dictionary(d_to_search, "A/B/C/D") If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary. Rules =========================== Each node can be either 1) an arbitrary non-list, non-dictionary object 2) a dictionary 3) a list of arbitrary objects All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search. At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise. Returns =========================== 1) If the path is an empty string, returns the original dict 2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`. *Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected. 3) None in the case that there are no objects at the search path. """ path_parts = path.split("/") return_list = [] if len(path_parts) == 0: #no search string return d else: try: sub_dicts = [d] #BFS, start with root node for i in range(0, len(path_parts)): #BFS new_sub_dicts = [] for s in sub_dicts: if path_parts[i] in s: #this tree node is part of the search path the_list = s[path_parts[i]] if isinstance(s[path_parts[i]], list) else [s[path_parts[i]]] for j in the_list: if i < len(path_parts) -1: #not a leaf node; check level if isinstance(j, dict): #skip this non-leaf node if not a dict new_sub_dicts.append(j) #BFS expansion else: #leaf node at the desired path; add to final return list return_list.append(j) sub_dicts = new_sub_dicts #return return return_list[0] if squash_single and len(return_list) == 1 else return_list if len(return_list) >= 1 else None except: return None
python
def iterate_dictionary(d, path, squash_single = False): """ Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS. The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function iterate_dictionary(d_to_search, "A/B/C/D") If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary. Rules =========================== Each node can be either 1) an arbitrary non-list, non-dictionary object 2) a dictionary 3) a list of arbitrary objects All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search. At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise. Returns =========================== 1) If the path is an empty string, returns the original dict 2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`. *Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected. 3) None in the case that there are no objects at the search path. """ path_parts = path.split("/") return_list = [] if len(path_parts) == 0: #no search string return d else: try: sub_dicts = [d] #BFS, start with root node for i in range(0, len(path_parts)): #BFS new_sub_dicts = [] for s in sub_dicts: if path_parts[i] in s: #this tree node is part of the search path the_list = s[path_parts[i]] if isinstance(s[path_parts[i]], list) else [s[path_parts[i]]] for j in the_list: if i < len(path_parts) -1: #not a leaf node; check level if isinstance(j, dict): #skip this non-leaf node if not a dict new_sub_dicts.append(j) #BFS expansion else: #leaf node at the desired path; add to final return list return_list.append(j) sub_dicts = new_sub_dicts #return return return_list[0] if squash_single and len(return_list) == 1 else return_list if len(return_list) >= 1 else None except: return None
[ "def", "iterate_dictionary", "(", "d", ",", "path", ",", "squash_single", "=", "False", ")", ":", "path_parts", "=", "path", ".", "split", "(", "\"/\"", ")", "return_list", "=", "[", "]", "if", "len", "(", "path_parts", ")", "==", "0", ":", "#no search string", "return", "d", "else", ":", "try", ":", "sub_dicts", "=", "[", "d", "]", "#BFS, start with root node", "for", "i", "in", "range", "(", "0", ",", "len", "(", "path_parts", ")", ")", ":", "#BFS", "new_sub_dicts", "=", "[", "]", "for", "s", "in", "sub_dicts", ":", "if", "path_parts", "[", "i", "]", "in", "s", ":", "#this tree node is part of the search path", "the_list", "=", "s", "[", "path_parts", "[", "i", "]", "]", "if", "isinstance", "(", "s", "[", "path_parts", "[", "i", "]", "]", ",", "list", ")", "else", "[", "s", "[", "path_parts", "[", "i", "]", "]", "]", "for", "j", "in", "the_list", ":", "if", "i", "<", "len", "(", "path_parts", ")", "-", "1", ":", "#not a leaf node; check level", "if", "isinstance", "(", "j", ",", "dict", ")", ":", "#skip this non-leaf node if not a dict", "new_sub_dicts", ".", "append", "(", "j", ")", "#BFS expansion", "else", ":", "#leaf node at the desired path; add to final return list", "return_list", ".", "append", "(", "j", ")", "sub_dicts", "=", "new_sub_dicts", "#return ", "return", "return_list", "[", "0", "]", "if", "squash_single", "and", "len", "(", "return_list", ")", "==", "1", "else", "return_list", "if", "len", "(", "return_list", ")", ">=", "1", "else", "None", "except", ":", "return", "None" ]
Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS. The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function iterate_dictionary(d_to_search, "A/B/C/D") If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary. Rules =========================== Each node can be either 1) an arbitrary non-list, non-dictionary object 2) a dictionary 3) a list of arbitrary objects All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search. At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise. Returns =========================== 1) If the path is an empty string, returns the original dict 2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`. *Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected. 3) None in the case that there are no objects at the search path.
[ "Takes", "a", "dict", "and", "a", "path", "delimited", "with", "slashes", "like", "A", "/", "B", "/", "C", "/", "D", "and", "returns", "a", "list", "of", "objects", "found", "at", "all", "leaf", "nodes", "at", "all", "trajectories", "dict", "[", "A", "]", "[", "B", "]", "[", "C", "]", "[", "D", "]", ".", "It", "does", "this", "using", "BFS", "not", "DFS", ".", "The", "word", "leaf", "hereby", "refers", "to", "an", "item", "at", "the", "search", "path", "level", ".", "That", "is", "upon", "calling", "the", "function", "iterate_dictionary", "(", "d_to_search", "A", "/", "B", "/", "C", "/", "D", ")", "If", "d_to_search", "has", "five", "levels", "A", "/", "B", "/", "C", "/", "D", "/", "E", "then", "D", "is", "the", "leaf", "node", "level", ".", "Since", "[", "E", "]", "exists", "then", "at", "least", "one", "object", "in", "the", "return", "list", "will", "be", "a", "dictionary", ".", "Rules", "===========================", "Each", "node", "can", "be", "either", "1", ")", "an", "arbitrary", "non", "-", "list", "non", "-", "dictionary", "object", "2", ")", "a", "dictionary", "3", ")", "a", "list", "of", "arbitrary", "objects", "All", "nodes", "of", "type", "3", "at", "each", "level", "are", "searched", "for", "nodes", "of", "type", "1", "and", "2", ".", "Nodes", "of", "type", "2", "are", "the", "ones", "iterated", "in", "this", "tree", "search", ".", "At", "the", "current", "time", "nodes", "of", "type", "1", "are", "*", "not", "*", "inspected", ".", "They", "are", "returned", "in", "a", "list", "if", "they", "are", "at", "the", "search", "path", "and", "ignored", "otherwise", ".", "Returns", "===========================", "1", ")", "If", "the", "path", "is", "an", "empty", "string", "returns", "the", "original", "dict", "2", ")", "*", "If", "*", "at", "least", "one", "object", "exists", "at", "the", "search", "path", "it", "returns", "a", "list", "of", "all", "items", "at", "the", "search", "path", ".", "Using", "the", "above", "example", "terminology", "a", "list", "of", "all", "objects", "at", "all", "trajectories", "A", "/", "B", "/", "C", "/", "D", ".", "*", "Special", "Parameter", "*", ":", "If", "the", "optional", "Boolean", "parameter", "squash_single", "is", "True", "and", "the", "return", "list", "contains", "only", "one", "object", "the", "object", "is", "returned", "(", "*", "not", "*", "a", "list", ")", "else", "a", "list", "with", "that", "one", "object", "is", "returned", ".", "This", "optional", "flag", "is", "useful", "so", "that", "[", "0", "]", "does", "not", "have", "to", "be", "indexed", "on", "the", "return", "list", "in", "the", "case", "where", "only", "one", "item", "is", "expected", ".", "3", ")", "None", "in", "the", "case", "that", "there", "are", "no", "objects", "at", "the", "search", "path", "." ]
train
https://github.com/tommyjcarpenter/dictsearch/blob/a14ca489b6bdd5b636099381add0cc6b94ec665d/dictsearch/search.py#L1-L56
QualiSystems/cloudshell-networking-devices
cloudshell/devices/snmp_handler.py
SnmpHandler.get_snmp_service
def get_snmp_service(self): """ Enable/Disable snmp :param snmp_parameters: :return: :rtype: SnmpContextManager """ return SnmpContextManager(self.enable_flow, self.disable_flow, self._snmp_parameters, self._logger)
python
def get_snmp_service(self): """ Enable/Disable snmp :param snmp_parameters: :return: :rtype: SnmpContextManager """ return SnmpContextManager(self.enable_flow, self.disable_flow, self._snmp_parameters, self._logger)
[ "def", "get_snmp_service", "(", "self", ")", ":", "return", "SnmpContextManager", "(", "self", ".", "enable_flow", ",", "self", ".", "disable_flow", ",", "self", ".", "_snmp_parameters", ",", "self", ".", "_logger", ")" ]
Enable/Disable snmp :param snmp_parameters: :return: :rtype: SnmpContextManager
[ "Enable", "/", "Disable", "snmp", ":", "param", "snmp_parameters", ":", ":", "return", ":", ":", "rtype", ":", "SnmpContextManager" ]
train
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/snmp_handler.py#L85-L92
internetarchive/doublethink
doublethink/cli.py
purge_stale_services
def purge_stale_services(argv=None): """Command-line utility to periodically purge stale entries from the "services" table. It is designed to be used in conjunction with cron. """ argv = argv or sys.argv arg_parser = argparse.ArgumentParser( prog=os.path.basename(argv[0]), description=( 'doublethink-purge-stale-services: utility to periodically ' 'purge stale entries from the "services" table.')) arg_parser.add_argument( "-d", "--rethinkdb-db", required=True, dest="database", help="A RethinkDB database containing a 'services' table") arg_parser.add_argument("-s", "--rethinkdb-servers", metavar="SERVERS", dest="servers", default='localhost', help="rethinkdb servers, e.g. db0.foo.org,db0.foo.org:38015,db1.foo.org") arg_parser.add_argument( '-v', '--verbose', dest='log_level', action='store_const', default=logging.INFO, const=logging.DEBUG, help=( 'verbose logging')) args = arg_parser.parse_args(argv[1:]) logging.basicConfig( stream=sys.stdout, level=args.log_level, format=( '%(asctime)s %(process)d %(levelname)s %(threadName)s ' '%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s')) args.servers = [srv.strip() for srv in args.servers.split(",")] rethinker = doublethink.Rethinker(servers=args.servers, db=args.database) registry = doublethink.services.ServiceRegistry(rethinker) registry.purge_stale_services() return 0
python
def purge_stale_services(argv=None): """Command-line utility to periodically purge stale entries from the "services" table. It is designed to be used in conjunction with cron. """ argv = argv or sys.argv arg_parser = argparse.ArgumentParser( prog=os.path.basename(argv[0]), description=( 'doublethink-purge-stale-services: utility to periodically ' 'purge stale entries from the "services" table.')) arg_parser.add_argument( "-d", "--rethinkdb-db", required=True, dest="database", help="A RethinkDB database containing a 'services' table") arg_parser.add_argument("-s", "--rethinkdb-servers", metavar="SERVERS", dest="servers", default='localhost', help="rethinkdb servers, e.g. db0.foo.org,db0.foo.org:38015,db1.foo.org") arg_parser.add_argument( '-v', '--verbose', dest='log_level', action='store_const', default=logging.INFO, const=logging.DEBUG, help=( 'verbose logging')) args = arg_parser.parse_args(argv[1:]) logging.basicConfig( stream=sys.stdout, level=args.log_level, format=( '%(asctime)s %(process)d %(levelname)s %(threadName)s ' '%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s')) args.servers = [srv.strip() for srv in args.servers.split(",")] rethinker = doublethink.Rethinker(servers=args.servers, db=args.database) registry = doublethink.services.ServiceRegistry(rethinker) registry.purge_stale_services() return 0
[ "def", "purge_stale_services", "(", "argv", "=", "None", ")", ":", "argv", "=", "argv", "or", "sys", ".", "argv", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "os", ".", "path", ".", "basename", "(", "argv", "[", "0", "]", ")", ",", "description", "=", "(", "'doublethink-purge-stale-services: utility to periodically '", "'purge stale entries from the \"services\" table.'", ")", ")", "arg_parser", ".", "add_argument", "(", "\"-d\"", ",", "\"--rethinkdb-db\"", ",", "required", "=", "True", ",", "dest", "=", "\"database\"", ",", "help", "=", "\"A RethinkDB database containing a 'services' table\"", ")", "arg_parser", ".", "add_argument", "(", "\"-s\"", ",", "\"--rethinkdb-servers\"", ",", "metavar", "=", "\"SERVERS\"", ",", "dest", "=", "\"servers\"", ",", "default", "=", "'localhost'", ",", "help", "=", "\"rethinkdb servers, e.g. db0.foo.org,db0.foo.org:38015,db1.foo.org\"", ")", "arg_parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "dest", "=", "'log_level'", ",", "action", "=", "'store_const'", ",", "default", "=", "logging", ".", "INFO", ",", "const", "=", "logging", ".", "DEBUG", ",", "help", "=", "(", "'verbose logging'", ")", ")", "args", "=", "arg_parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "logging", ".", "basicConfig", "(", "stream", "=", "sys", ".", "stdout", ",", "level", "=", "args", ".", "log_level", ",", "format", "=", "(", "'%(asctime)s %(process)d %(levelname)s %(threadName)s '", "'%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s'", ")", ")", "args", ".", "servers", "=", "[", "srv", ".", "strip", "(", ")", "for", "srv", "in", "args", ".", "servers", ".", "split", "(", "\",\"", ")", "]", "rethinker", "=", "doublethink", ".", "Rethinker", "(", "servers", "=", "args", ".", "servers", ",", "db", "=", "args", ".", "database", ")", "registry", "=", "doublethink", ".", "services", ".", "ServiceRegistry", "(", "rethinker", ")", "registry", ".", "purge_stale_services", "(", ")", "return", "0" ]
Command-line utility to periodically purge stale entries from the "services" table. It is designed to be used in conjunction with cron.
[ "Command", "-", "line", "utility", "to", "periodically", "purge", "stale", "entries", "from", "the", "services", "table", "." ]
train
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/cli.py#L25-L59
Workiva/furious
furious/context/context.py
_insert_tasks
def _insert_tasks(tasks, queue, transactional=False, retry_transient_errors=True, retry_delay=RETRY_SLEEP_SECS): """Insert a batch of tasks into the specified queue. If an error occurs during insertion, split the batch and retry until they are successfully inserted. Return the number of successfully inserted tasks. """ from google.appengine.api import taskqueue if not tasks: return 0 try: taskqueue.Queue(name=queue).add(tasks, transactional=transactional) return len(tasks) except (taskqueue.BadTaskStateError, taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError): if len(tasks) <= 1: # Task has already been inserted, no reason to report an error here. return 0 # If a list of more than one Tasks is given, a raised exception does # not guarantee that no tasks were added to the queue (unless # transactional is set to True). To determine which tasks were # successfully added when an exception is raised, check the # Task.was_enqueued property. reinsert = _tasks_to_reinsert(tasks, transactional) count = len(reinsert) inserted = len(tasks) - count inserted += _insert_tasks(reinsert[:count / 2], queue, transactional, retry_transient_errors, retry_delay) inserted += _insert_tasks(reinsert[count / 2:], queue, transactional, retry_transient_errors, retry_delay) return inserted except taskqueue.TransientError: # Always re-raise for transactional insert, or if specified by # options. if transactional or not retry_transient_errors: raise reinsert = _tasks_to_reinsert(tasks, transactional) # Retry with a delay, and then let any errors re-raise. time.sleep(retry_delay) taskqueue.Queue(name=queue).add(reinsert, transactional=transactional) return len(tasks)
python
def _insert_tasks(tasks, queue, transactional=False, retry_transient_errors=True, retry_delay=RETRY_SLEEP_SECS): """Insert a batch of tasks into the specified queue. If an error occurs during insertion, split the batch and retry until they are successfully inserted. Return the number of successfully inserted tasks. """ from google.appengine.api import taskqueue if not tasks: return 0 try: taskqueue.Queue(name=queue).add(tasks, transactional=transactional) return len(tasks) except (taskqueue.BadTaskStateError, taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError): if len(tasks) <= 1: # Task has already been inserted, no reason to report an error here. return 0 # If a list of more than one Tasks is given, a raised exception does # not guarantee that no tasks were added to the queue (unless # transactional is set to True). To determine which tasks were # successfully added when an exception is raised, check the # Task.was_enqueued property. reinsert = _tasks_to_reinsert(tasks, transactional) count = len(reinsert) inserted = len(tasks) - count inserted += _insert_tasks(reinsert[:count / 2], queue, transactional, retry_transient_errors, retry_delay) inserted += _insert_tasks(reinsert[count / 2:], queue, transactional, retry_transient_errors, retry_delay) return inserted except taskqueue.TransientError: # Always re-raise for transactional insert, or if specified by # options. if transactional or not retry_transient_errors: raise reinsert = _tasks_to_reinsert(tasks, transactional) # Retry with a delay, and then let any errors re-raise. time.sleep(retry_delay) taskqueue.Queue(name=queue).add(reinsert, transactional=transactional) return len(tasks)
[ "def", "_insert_tasks", "(", "tasks", ",", "queue", ",", "transactional", "=", "False", ",", "retry_transient_errors", "=", "True", ",", "retry_delay", "=", "RETRY_SLEEP_SECS", ")", ":", "from", "google", ".", "appengine", ".", "api", "import", "taskqueue", "if", "not", "tasks", ":", "return", "0", "try", ":", "taskqueue", ".", "Queue", "(", "name", "=", "queue", ")", ".", "add", "(", "tasks", ",", "transactional", "=", "transactional", ")", "return", "len", "(", "tasks", ")", "except", "(", "taskqueue", ".", "BadTaskStateError", ",", "taskqueue", ".", "TaskAlreadyExistsError", ",", "taskqueue", ".", "TombstonedTaskError", ")", ":", "if", "len", "(", "tasks", ")", "<=", "1", ":", "# Task has already been inserted, no reason to report an error here.", "return", "0", "# If a list of more than one Tasks is given, a raised exception does", "# not guarantee that no tasks were added to the queue (unless", "# transactional is set to True). To determine which tasks were", "# successfully added when an exception is raised, check the", "# Task.was_enqueued property.", "reinsert", "=", "_tasks_to_reinsert", "(", "tasks", ",", "transactional", ")", "count", "=", "len", "(", "reinsert", ")", "inserted", "=", "len", "(", "tasks", ")", "-", "count", "inserted", "+=", "_insert_tasks", "(", "reinsert", "[", ":", "count", "/", "2", "]", ",", "queue", ",", "transactional", ",", "retry_transient_errors", ",", "retry_delay", ")", "inserted", "+=", "_insert_tasks", "(", "reinsert", "[", "count", "/", "2", ":", "]", ",", "queue", ",", "transactional", ",", "retry_transient_errors", ",", "retry_delay", ")", "return", "inserted", "except", "taskqueue", ".", "TransientError", ":", "# Always re-raise for transactional insert, or if specified by", "# options.", "if", "transactional", "or", "not", "retry_transient_errors", ":", "raise", "reinsert", "=", "_tasks_to_reinsert", "(", "tasks", ",", "transactional", ")", "# Retry with a delay, and then let any errors re-raise.", "time", ".", "sleep", "(", "retry_delay", ")", "taskqueue", ".", "Queue", "(", "name", "=", "queue", ")", ".", "add", "(", "reinsert", ",", "transactional", "=", "transactional", ")", "return", "len", "(", "tasks", ")" ]
Insert a batch of tasks into the specified queue. If an error occurs during insertion, split the batch and retry until they are successfully inserted. Return the number of successfully inserted tasks.
[ "Insert", "a", "batch", "of", "tasks", "into", "the", "specified", "queue", ".", "If", "an", "error", "occurs", "during", "insertion", "split", "the", "batch", "and", "retry", "until", "they", "are", "successfully", "inserted", ".", "Return", "the", "number", "of", "successfully", "inserted", "tasks", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L368-L416
Workiva/furious
furious/context/context.py
_tasks_to_reinsert
def _tasks_to_reinsert(tasks, transactional): """Return a list containing the tasks that should be reinserted based on the was_enqueued property and whether the insert is transactional or not. """ if transactional: return tasks return [task for task in tasks if not task.was_enqueued]
python
def _tasks_to_reinsert(tasks, transactional): """Return a list containing the tasks that should be reinserted based on the was_enqueued property and whether the insert is transactional or not. """ if transactional: return tasks return [task for task in tasks if not task.was_enqueued]
[ "def", "_tasks_to_reinsert", "(", "tasks", ",", "transactional", ")", ":", "if", "transactional", ":", "return", "tasks", "return", "[", "task", "for", "task", "in", "tasks", "if", "not", "task", ".", "was_enqueued", "]" ]
Return a list containing the tasks that should be reinserted based on the was_enqueued property and whether the insert is transactional or not.
[ "Return", "a", "list", "containing", "the", "tasks", "that", "should", "be", "reinserted", "based", "on", "the", "was_enqueued", "property", "and", "whether", "the", "insert", "is", "transactional", "or", "not", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L419-L426
Workiva/furious
furious/context/context.py
_task_batcher
def _task_batcher(tasks, batch_size=None): """Batches large task lists into groups of 100 so that they can all be inserted. """ from itertools import izip_longest if not batch_size: batch_size = DEFAULT_TASK_BATCH_SIZE # Ensure the batch size is under the task api limit. batch_size = min(batch_size, 100) args = [iter(tasks)] * batch_size return ([task for task in group if task] for group in izip_longest(*args))
python
def _task_batcher(tasks, batch_size=None): """Batches large task lists into groups of 100 so that they can all be inserted. """ from itertools import izip_longest if not batch_size: batch_size = DEFAULT_TASK_BATCH_SIZE # Ensure the batch size is under the task api limit. batch_size = min(batch_size, 100) args = [iter(tasks)] * batch_size return ([task for task in group if task] for group in izip_longest(*args))
[ "def", "_task_batcher", "(", "tasks", ",", "batch_size", "=", "None", ")", ":", "from", "itertools", "import", "izip_longest", "if", "not", "batch_size", ":", "batch_size", "=", "DEFAULT_TASK_BATCH_SIZE", "# Ensure the batch size is under the task api limit.", "batch_size", "=", "min", "(", "batch_size", ",", "100", ")", "args", "=", "[", "iter", "(", "tasks", ")", "]", "*", "batch_size", "return", "(", "[", "task", "for", "task", "in", "group", "if", "task", "]", "for", "group", "in", "izip_longest", "(", "*", "args", ")", ")" ]
Batches large task lists into groups of 100 so that they can all be inserted.
[ "Batches", "large", "task", "lists", "into", "groups", "of", "100", "so", "that", "they", "can", "all", "be", "inserted", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L429-L442
Workiva/furious
furious/context/context.py
Context._handle_tasks_insert
def _handle_tasks_insert(self, batch_size=None): """Convert all Async's into tasks, then insert them into queues.""" if self._tasks_inserted: raise errors.ContextAlreadyStartedError( "This Context has already had its tasks inserted.") task_map = self._get_tasks_by_queue() # QUESTION: Should the persist happen before or after the task # insertion? I feel like this is something that will alter the # behavior of the tasks themselves by adding a callback (check context # complete) to each Async's callback stack. # If we are able to and there is a reason to persist... persist. callbacks = self._options.get('callbacks') if self._persistence_engine and callbacks: self.persist() retry_transient = self._options.get('retry_transient_errors', True) retry_delay = self._options.get('retry_delay', RETRY_SLEEP_SECS) for queue, tasks in task_map.iteritems(): for batch in _task_batcher(tasks, batch_size=batch_size): inserted = self._insert_tasks( batch, queue=queue, retry_transient_errors=retry_transient, retry_delay=retry_delay ) if isinstance(inserted, (int, long)): # Don't blow up on insert_tasks that don't return counts. self._insert_success_count += inserted self._insert_failed_count += len(batch) - inserted
python
def _handle_tasks_insert(self, batch_size=None): """Convert all Async's into tasks, then insert them into queues.""" if self._tasks_inserted: raise errors.ContextAlreadyStartedError( "This Context has already had its tasks inserted.") task_map = self._get_tasks_by_queue() # QUESTION: Should the persist happen before or after the task # insertion? I feel like this is something that will alter the # behavior of the tasks themselves by adding a callback (check context # complete) to each Async's callback stack. # If we are able to and there is a reason to persist... persist. callbacks = self._options.get('callbacks') if self._persistence_engine and callbacks: self.persist() retry_transient = self._options.get('retry_transient_errors', True) retry_delay = self._options.get('retry_delay', RETRY_SLEEP_SECS) for queue, tasks in task_map.iteritems(): for batch in _task_batcher(tasks, batch_size=batch_size): inserted = self._insert_tasks( batch, queue=queue, retry_transient_errors=retry_transient, retry_delay=retry_delay ) if isinstance(inserted, (int, long)): # Don't blow up on insert_tasks that don't return counts. self._insert_success_count += inserted self._insert_failed_count += len(batch) - inserted
[ "def", "_handle_tasks_insert", "(", "self", ",", "batch_size", "=", "None", ")", ":", "if", "self", ".", "_tasks_inserted", ":", "raise", "errors", ".", "ContextAlreadyStartedError", "(", "\"This Context has already had its tasks inserted.\"", ")", "task_map", "=", "self", ".", "_get_tasks_by_queue", "(", ")", "# QUESTION: Should the persist happen before or after the task", "# insertion? I feel like this is something that will alter the", "# behavior of the tasks themselves by adding a callback (check context", "# complete) to each Async's callback stack.", "# If we are able to and there is a reason to persist... persist.", "callbacks", "=", "self", ".", "_options", ".", "get", "(", "'callbacks'", ")", "if", "self", ".", "_persistence_engine", "and", "callbacks", ":", "self", ".", "persist", "(", ")", "retry_transient", "=", "self", ".", "_options", ".", "get", "(", "'retry_transient_errors'", ",", "True", ")", "retry_delay", "=", "self", ".", "_options", ".", "get", "(", "'retry_delay'", ",", "RETRY_SLEEP_SECS", ")", "for", "queue", ",", "tasks", "in", "task_map", ".", "iteritems", "(", ")", ":", "for", "batch", "in", "_task_batcher", "(", "tasks", ",", "batch_size", "=", "batch_size", ")", ":", "inserted", "=", "self", ".", "_insert_tasks", "(", "batch", ",", "queue", "=", "queue", ",", "retry_transient_errors", "=", "retry_transient", ",", "retry_delay", "=", "retry_delay", ")", "if", "isinstance", "(", "inserted", ",", "(", "int", ",", "long", ")", ")", ":", "# Don't blow up on insert_tasks that don't return counts.", "self", ".", "_insert_success_count", "+=", "inserted", "self", ".", "_insert_failed_count", "+=", "len", "(", "batch", ")", "-", "inserted" ]
Convert all Async's into tasks, then insert them into queues.
[ "Convert", "all", "Async", "s", "into", "tasks", "then", "insert", "them", "into", "queues", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L127-L157
Workiva/furious
furious/context/context.py
Context._get_tasks_by_queue
def _get_tasks_by_queue(self): """Return the tasks for this Context, grouped by queue.""" task_map = {} _checker = None # Ask the persistence engine for an Async to use for checking if the # context is complete. if self._persistence_engine: _checker = self._persistence_engine.context_completion_checker for async in self._tasks: queue = async.get_queue() if _checker: async.update_options(_context_checker=_checker) task = async.to_task() task_map.setdefault(queue, []).append(task) return task_map
python
def _get_tasks_by_queue(self): """Return the tasks for this Context, grouped by queue.""" task_map = {} _checker = None # Ask the persistence engine for an Async to use for checking if the # context is complete. if self._persistence_engine: _checker = self._persistence_engine.context_completion_checker for async in self._tasks: queue = async.get_queue() if _checker: async.update_options(_context_checker=_checker) task = async.to_task() task_map.setdefault(queue, []).append(task) return task_map
[ "def", "_get_tasks_by_queue", "(", "self", ")", ":", "task_map", "=", "{", "}", "_checker", "=", "None", "# Ask the persistence engine for an Async to use for checking if the", "# context is complete.", "if", "self", ".", "_persistence_engine", ":", "_checker", "=", "self", ".", "_persistence_engine", ".", "context_completion_checker", "for", "async", "in", "self", ".", "_tasks", ":", "queue", "=", "async", ".", "get_queue", "(", ")", "if", "_checker", ":", "async", ".", "update_options", "(", "_context_checker", "=", "_checker", ")", "task", "=", "async", ".", "to_task", "(", ")", "task_map", ".", "setdefault", "(", "queue", ",", "[", "]", ")", ".", "append", "(", "task", ")", "return", "task_map" ]
Return the tasks for this Context, grouped by queue.
[ "Return", "the", "tasks", "for", "this", "Context", "grouped", "by", "queue", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L167-L185
Workiva/furious
furious/context/context.py
Context.set_event_handler
def set_event_handler(self, event, handler): """Add an Async to be run on event.""" # QUESTION: Should we raise an exception if `event` is not in some # known event-type list? self._prepare_persistence_engine() callbacks = self._options.get('callbacks', {}) callbacks[event] = handler self._options['callbacks'] = callbacks
python
def set_event_handler(self, event, handler): """Add an Async to be run on event.""" # QUESTION: Should we raise an exception if `event` is not in some # known event-type list? self._prepare_persistence_engine() callbacks = self._options.get('callbacks', {}) callbacks[event] = handler self._options['callbacks'] = callbacks
[ "def", "set_event_handler", "(", "self", ",", "event", ",", "handler", ")", ":", "# QUESTION: Should we raise an exception if `event` is not in some", "# known event-type list?", "self", ".", "_prepare_persistence_engine", "(", ")", "callbacks", "=", "self", ".", "_options", ".", "get", "(", "'callbacks'", ",", "{", "}", ")", "callbacks", "[", "event", "]", "=", "handler", "self", ".", "_options", "[", "'callbacks'", "]", "=", "callbacks" ]
Add an Async to be run on event.
[ "Add", "an", "Async", "to", "be", "run", "on", "event", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L204-L213
Workiva/furious
furious/context/context.py
Context.exec_event_handler
def exec_event_handler(self, event, transactional=False): """Execute the Async set to be run on event.""" # QUESTION: Should we raise an exception if `event` is not in some # known event-type list? callbacks = self._options.get('callbacks', {}) handler = callbacks.get(event) if not handler: raise Exception('Handler not defined!!!') handler.start(transactional=transactional)
python
def exec_event_handler(self, event, transactional=False): """Execute the Async set to be run on event.""" # QUESTION: Should we raise an exception if `event` is not in some # known event-type list? callbacks = self._options.get('callbacks', {}) handler = callbacks.get(event) if not handler: raise Exception('Handler not defined!!!') handler.start(transactional=transactional)
[ "def", "exec_event_handler", "(", "self", ",", "event", ",", "transactional", "=", "False", ")", ":", "# QUESTION: Should we raise an exception if `event` is not in some", "# known event-type list?", "callbacks", "=", "self", ".", "_options", ".", "get", "(", "'callbacks'", ",", "{", "}", ")", "handler", "=", "callbacks", ".", "get", "(", "event", ")", "if", "not", "handler", ":", "raise", "Exception", "(", "'Handler not defined!!!'", ")", "handler", ".", "start", "(", "transactional", "=", "transactional", ")" ]
Execute the Async set to be run on event.
[ "Execute", "the", "Async", "set", "to", "be", "run", "on", "event", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L215-L227
Workiva/furious
furious/context/context.py
Context.add
def add(self, target, args=None, kwargs=None, **options): """Add an Async job to this context. Takes an Async object or the arguments to construct an Async object as arguments. Returns the newly added Async object. """ from furious.async import Async from furious.batcher import Message if self._tasks_inserted: raise errors.ContextAlreadyStartedError( "This Context has already had its tasks inserted.") if not isinstance(target, (Async, Message)): target = Async(target, args, kwargs, **options) target.update_options(_context_id=self.id) if self.persist_async_results: target.update_options(persist_result=True) self._tasks.append(target) self._options['_task_ids'].append(target.id) return target
python
def add(self, target, args=None, kwargs=None, **options): """Add an Async job to this context. Takes an Async object or the arguments to construct an Async object as arguments. Returns the newly added Async object. """ from furious.async import Async from furious.batcher import Message if self._tasks_inserted: raise errors.ContextAlreadyStartedError( "This Context has already had its tasks inserted.") if not isinstance(target, (Async, Message)): target = Async(target, args, kwargs, **options) target.update_options(_context_id=self.id) if self.persist_async_results: target.update_options(persist_result=True) self._tasks.append(target) self._options['_task_ids'].append(target.id) return target
[ "def", "add", "(", "self", ",", "target", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "*", "*", "options", ")", ":", "from", "furious", ".", "async", "import", "Async", "from", "furious", ".", "batcher", "import", "Message", "if", "self", ".", "_tasks_inserted", ":", "raise", "errors", ".", "ContextAlreadyStartedError", "(", "\"This Context has already had its tasks inserted.\"", ")", "if", "not", "isinstance", "(", "target", ",", "(", "Async", ",", "Message", ")", ")", ":", "target", "=", "Async", "(", "target", ",", "args", ",", "kwargs", ",", "*", "*", "options", ")", "target", ".", "update_options", "(", "_context_id", "=", "self", ".", "id", ")", "if", "self", ".", "persist_async_results", ":", "target", ".", "update_options", "(", "persist_result", "=", "True", ")", "self", ".", "_tasks", ".", "append", "(", "target", ")", "self", ".", "_options", "[", "'_task_ids'", "]", ".", "append", "(", "target", ".", "id", ")", "return", "target" ]
Add an Async job to this context. Takes an Async object or the arguments to construct an Async object as arguments. Returns the newly added Async object.
[ "Add", "an", "Async", "job", "to", "this", "context", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L229-L253
Workiva/furious
furious/context/context.py
Context.load
def load(cls, context_id, persistence_engine=None): """Load and instantiate a Context from the persistence_engine.""" if not persistence_engine: from furious.config import get_default_persistence_engine persistence_engine = get_default_persistence_engine() if not persistence_engine: raise RuntimeError( 'Specify a valid persistence_engine to load the context.') return persistence_engine.load_context(context_id)
python
def load(cls, context_id, persistence_engine=None): """Load and instantiate a Context from the persistence_engine.""" if not persistence_engine: from furious.config import get_default_persistence_engine persistence_engine = get_default_persistence_engine() if not persistence_engine: raise RuntimeError( 'Specify a valid persistence_engine to load the context.') return persistence_engine.load_context(context_id)
[ "def", "load", "(", "cls", ",", "context_id", ",", "persistence_engine", "=", "None", ")", ":", "if", "not", "persistence_engine", ":", "from", "furious", ".", "config", "import", "get_default_persistence_engine", "persistence_engine", "=", "get_default_persistence_engine", "(", ")", "if", "not", "persistence_engine", ":", "raise", "RuntimeError", "(", "'Specify a valid persistence_engine to load the context.'", ")", "return", "persistence_engine", ".", "load_context", "(", "context_id", ")" ]
Load and instantiate a Context from the persistence_engine.
[ "Load", "and", "instantiate", "a", "Context", "from", "the", "persistence_engine", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L269-L279
Workiva/furious
furious/context/context.py
Context.to_dict
def to_dict(self): """Return this Context as a dict suitable for json encoding.""" import copy options = copy.deepcopy(self._options) if self._insert_tasks: options['insert_tasks'] = reference_to_path(self._insert_tasks) if self._persistence_engine: options['persistence_engine'] = reference_to_path( self._persistence_engine) options.update({ '_tasks_inserted': self._tasks_inserted, }) callbacks = self._options.get('callbacks') if callbacks: options['callbacks'] = encode_callbacks(callbacks) return options
python
def to_dict(self): """Return this Context as a dict suitable for json encoding.""" import copy options = copy.deepcopy(self._options) if self._insert_tasks: options['insert_tasks'] = reference_to_path(self._insert_tasks) if self._persistence_engine: options['persistence_engine'] = reference_to_path( self._persistence_engine) options.update({ '_tasks_inserted': self._tasks_inserted, }) callbacks = self._options.get('callbacks') if callbacks: options['callbacks'] = encode_callbacks(callbacks) return options
[ "def", "to_dict", "(", "self", ")", ":", "import", "copy", "options", "=", "copy", ".", "deepcopy", "(", "self", ".", "_options", ")", "if", "self", ".", "_insert_tasks", ":", "options", "[", "'insert_tasks'", "]", "=", "reference_to_path", "(", "self", ".", "_insert_tasks", ")", "if", "self", ".", "_persistence_engine", ":", "options", "[", "'persistence_engine'", "]", "=", "reference_to_path", "(", "self", ".", "_persistence_engine", ")", "options", ".", "update", "(", "{", "'_tasks_inserted'", ":", "self", ".", "_tasks_inserted", ",", "}", ")", "callbacks", "=", "self", ".", "_options", ".", "get", "(", "'callbacks'", ")", "if", "callbacks", ":", "options", "[", "'callbacks'", "]", "=", "encode_callbacks", "(", "callbacks", ")", "return", "options" ]
Return this Context as a dict suitable for json encoding.
[ "Return", "this", "Context", "as", "a", "dict", "suitable", "for", "json", "encoding", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L281-L302
Workiva/furious
furious/context/context.py
Context.from_dict
def from_dict(cls, context_options_dict): """Return a context job from a dict output by Context.to_dict.""" import copy context_options = copy.deepcopy(context_options_dict) tasks_inserted = context_options.pop('_tasks_inserted', False) insert_tasks = context_options.pop('insert_tasks', None) if insert_tasks: context_options['insert_tasks'] = path_to_reference(insert_tasks) # The constructor expects a reference to the persistence engine. persistence_engine = context_options.pop('persistence_engine', None) if persistence_engine: context_options['persistence_engine'] = path_to_reference( persistence_engine) # If there are callbacks, reconstitute them. callbacks = context_options.pop('callbacks', None) if callbacks: context_options['callbacks'] = decode_callbacks(callbacks) context = cls(**context_options) context._tasks_inserted = tasks_inserted return context
python
def from_dict(cls, context_options_dict): """Return a context job from a dict output by Context.to_dict.""" import copy context_options = copy.deepcopy(context_options_dict) tasks_inserted = context_options.pop('_tasks_inserted', False) insert_tasks = context_options.pop('insert_tasks', None) if insert_tasks: context_options['insert_tasks'] = path_to_reference(insert_tasks) # The constructor expects a reference to the persistence engine. persistence_engine = context_options.pop('persistence_engine', None) if persistence_engine: context_options['persistence_engine'] = path_to_reference( persistence_engine) # If there are callbacks, reconstitute them. callbacks = context_options.pop('callbacks', None) if callbacks: context_options['callbacks'] = decode_callbacks(callbacks) context = cls(**context_options) context._tasks_inserted = tasks_inserted return context
[ "def", "from_dict", "(", "cls", ",", "context_options_dict", ")", ":", "import", "copy", "context_options", "=", "copy", ".", "deepcopy", "(", "context_options_dict", ")", "tasks_inserted", "=", "context_options", ".", "pop", "(", "'_tasks_inserted'", ",", "False", ")", "insert_tasks", "=", "context_options", ".", "pop", "(", "'insert_tasks'", ",", "None", ")", "if", "insert_tasks", ":", "context_options", "[", "'insert_tasks'", "]", "=", "path_to_reference", "(", "insert_tasks", ")", "# The constructor expects a reference to the persistence engine.", "persistence_engine", "=", "context_options", ".", "pop", "(", "'persistence_engine'", ",", "None", ")", "if", "persistence_engine", ":", "context_options", "[", "'persistence_engine'", "]", "=", "path_to_reference", "(", "persistence_engine", ")", "# If there are callbacks, reconstitute them.", "callbacks", "=", "context_options", ".", "pop", "(", "'callbacks'", ",", "None", ")", "if", "callbacks", ":", "context_options", "[", "'callbacks'", "]", "=", "decode_callbacks", "(", "callbacks", ")", "context", "=", "cls", "(", "*", "*", "context_options", ")", "context", ".", "_tasks_inserted", "=", "tasks_inserted", "return", "context" ]
Return a context job from a dict output by Context.to_dict.
[ "Return", "a", "context", "job", "from", "a", "dict", "output", "by", "Context", ".", "to_dict", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L305-L332
Workiva/furious
furious/context/context.py
Context.result
def result(self): """Return the context result object pulled from the persistence_engine if it has been set. """ if not self._result: if not self._persistence_engine: return None self._result = self._persistence_engine.get_context_result(self) return self._result
python
def result(self): """Return the context result object pulled from the persistence_engine if it has been set. """ if not self._result: if not self._persistence_engine: return None self._result = self._persistence_engine.get_context_result(self) return self._result
[ "def", "result", "(", "self", ")", ":", "if", "not", "self", ".", "_result", ":", "if", "not", "self", ".", "_persistence_engine", ":", "return", "None", "self", ".", "_result", "=", "self", ".", "_persistence_engine", ".", "get_context_result", "(", "self", ")", "return", "self", ".", "_result" ]
Return the context result object pulled from the persistence_engine if it has been set.
[ "Return", "the", "context", "result", "object", "pulled", "from", "the", "persistence_engine", "if", "it", "has", "been", "set", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L335-L345
Workiva/furious
furious/extras/insert_task_handlers.py
insert_tasks_ignore_duplicate_names
def insert_tasks_ignore_duplicate_names(tasks, queue, *args, **kwargs): """Insert a batch of tasks into a specific queue. If a DuplicateTaskNameError is raised, loop through the tasks and insert the remaining, ignoring and logging the duplicate tasks. Returns the number of successfully inserted tasks. """ from google.appengine.api import taskqueue try: inserted = _insert_tasks(tasks, queue, *args, **kwargs) return inserted except taskqueue.DuplicateTaskNameError: # At least one task failed in our batch, attempt to re-insert the # remaining tasks. Named tasks can never be transactional. reinsert = _tasks_to_reinsert(tasks, transactional=False) count = len(reinsert) inserted = len(tasks) - count # Our subsequent task inserts should raise TaskAlreadyExistsError at # least once, but that will be swallowed by _insert_tasks. for task in reinsert: inserted += _insert_tasks([task], queue, *args, **kwargs) return inserted
python
def insert_tasks_ignore_duplicate_names(tasks, queue, *args, **kwargs): """Insert a batch of tasks into a specific queue. If a DuplicateTaskNameError is raised, loop through the tasks and insert the remaining, ignoring and logging the duplicate tasks. Returns the number of successfully inserted tasks. """ from google.appengine.api import taskqueue try: inserted = _insert_tasks(tasks, queue, *args, **kwargs) return inserted except taskqueue.DuplicateTaskNameError: # At least one task failed in our batch, attempt to re-insert the # remaining tasks. Named tasks can never be transactional. reinsert = _tasks_to_reinsert(tasks, transactional=False) count = len(reinsert) inserted = len(tasks) - count # Our subsequent task inserts should raise TaskAlreadyExistsError at # least once, but that will be swallowed by _insert_tasks. for task in reinsert: inserted += _insert_tasks([task], queue, *args, **kwargs) return inserted
[ "def", "insert_tasks_ignore_duplicate_names", "(", "tasks", ",", "queue", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "google", ".", "appengine", ".", "api", "import", "taskqueue", "try", ":", "inserted", "=", "_insert_tasks", "(", "tasks", ",", "queue", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inserted", "except", "taskqueue", ".", "DuplicateTaskNameError", ":", "# At least one task failed in our batch, attempt to re-insert the", "# remaining tasks. Named tasks can never be transactional.", "reinsert", "=", "_tasks_to_reinsert", "(", "tasks", ",", "transactional", "=", "False", ")", "count", "=", "len", "(", "reinsert", ")", "inserted", "=", "len", "(", "tasks", ")", "-", "count", "# Our subsequent task inserts should raise TaskAlreadyExistsError at", "# least once, but that will be swallowed by _insert_tasks.", "for", "task", "in", "reinsert", ":", "inserted", "+=", "_insert_tasks", "(", "[", "task", "]", ",", "queue", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inserted" ]
Insert a batch of tasks into a specific queue. If a DuplicateTaskNameError is raised, loop through the tasks and insert the remaining, ignoring and logging the duplicate tasks. Returns the number of successfully inserted tasks.
[ "Insert", "a", "batch", "of", "tasks", "into", "a", "specific", "queue", ".", "If", "a", "DuplicateTaskNameError", "is", "raised", "loop", "through", "the", "tasks", "and", "insert", "the", "remaining", "ignoring", "and", "logging", "the", "duplicate", "tasks", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/insert_task_handlers.py#L5-L32
Workiva/furious
example/grep.py
log_results
def log_results(): """This is the callback that is run once the Async task is finished. It takes the output from grep and logs it.""" from furious.context import get_current_async # Get the recently finished Async object. async = get_current_async() # Pull out the result data and log it. for result in async.result: logging.info(result)
python
def log_results(): """This is the callback that is run once the Async task is finished. It takes the output from grep and logs it.""" from furious.context import get_current_async # Get the recently finished Async object. async = get_current_async() # Pull out the result data and log it. for result in async.result: logging.info(result)
[ "def", "log_results", "(", ")", ":", "from", "furious", ".", "context", "import", "get_current_async", "# Get the recently finished Async object.", "async", "=", "get_current_async", "(", ")", "# Pull out the result data and log it.", "for", "result", "in", "async", ".", "result", ":", "logging", ".", "info", "(", "result", ")" ]
This is the callback that is run once the Async task is finished. It takes the output from grep and logs it.
[ "This", "is", "the", "callback", "that", "is", "run", "once", "the", "Async", "task", "is", "finished", ".", "It", "takes", "the", "output", "from", "grep", "and", "logs", "it", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/grep.py#L47-L57
Workiva/furious
example/grep.py
build_and_start
def build_and_start(query, directory): """This function will create and then start a new Async task with the default callbacks argument defined in the decorator.""" Async(target=grep, args=[query, directory]).start()
python
def build_and_start(query, directory): """This function will create and then start a new Async task with the default callbacks argument defined in the decorator.""" Async(target=grep, args=[query, directory]).start()
[ "def", "build_and_start", "(", "query", ",", "directory", ")", ":", "Async", "(", "target", "=", "grep", ",", "args", "=", "[", "query", ",", "directory", "]", ")", ".", "start", "(", ")" ]
This function will create and then start a new Async task with the default callbacks argument defined in the decorator.
[ "This", "function", "will", "create", "and", "then", "start", "a", "new", "Async", "task", "with", "the", "default", "callbacks", "argument", "defined", "in", "the", "decorator", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/grep.py#L60-L64
Workiva/furious
example/grep.py
grep_file
def grep_file(query, item): """This function performs the actual grep on a given file.""" return ['%s: %s' % (item, line) for line in open(item) if re.search(query, line)]
python
def grep_file(query, item): """This function performs the actual grep on a given file.""" return ['%s: %s' % (item, line) for line in open(item) if re.search(query, line)]
[ "def", "grep_file", "(", "query", ",", "item", ")", ":", "return", "[", "'%s: %s'", "%", "(", "item", ",", "line", ")", "for", "line", "in", "open", "(", "item", ")", "if", "re", ".", "search", "(", "query", ",", "line", ")", "]" ]
This function performs the actual grep on a given file.
[ "This", "function", "performs", "the", "actual", "grep", "on", "a", "given", "file", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/grep.py#L67-L70
Workiva/furious
example/grep.py
grep
def grep(query, directory): """This function will search through the directory structure of the application and for each directory it finds it launches an Async task to run itself. For each .py file it finds, it actually greps the file and then returns the found output.""" dir_contents = os.listdir(directory) results = [] for item in dir_contents: path = os.path.join(directory, item) if os.path.isdir(path): build_and_start(query, path) else: if item.endswith('.py'): results.extend(grep_file(query, path)) return results
python
def grep(query, directory): """This function will search through the directory structure of the application and for each directory it finds it launches an Async task to run itself. For each .py file it finds, it actually greps the file and then returns the found output.""" dir_contents = os.listdir(directory) results = [] for item in dir_contents: path = os.path.join(directory, item) if os.path.isdir(path): build_and_start(query, path) else: if item.endswith('.py'): results.extend(grep_file(query, path)) return results
[ "def", "grep", "(", "query", ",", "directory", ")", ":", "dir_contents", "=", "os", ".", "listdir", "(", "directory", ")", "results", "=", "[", "]", "for", "item", "in", "dir_contents", ":", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "build_and_start", "(", "query", ",", "path", ")", "else", ":", "if", "item", ".", "endswith", "(", "'.py'", ")", ":", "results", ".", "extend", "(", "grep_file", "(", "query", ",", "path", ")", ")", "return", "results" ]
This function will search through the directory structure of the application and for each directory it finds it launches an Async task to run itself. For each .py file it finds, it actually greps the file and then returns the found output.
[ "This", "function", "will", "search", "through", "the", "directory", "structure", "of", "the", "application", "and", "for", "each", "directory", "it", "finds", "it", "launches", "an", "Async", "task", "to", "run", "itself", ".", "For", "each", ".", "py", "file", "it", "finds", "it", "actually", "greps", "the", "file", "and", "then", "returns", "the", "found", "output", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/grep.py#L74-L89
Workiva/furious
furious/config.py
_get_configured_module
def _get_configured_module(option_name, known_modules=None): """Get the module specified by the value of option_name. The value of the configuration option will be used to load the module by name from the known module list or treated as a path if not found in known_modules. Args: option_name: name of persistence module known_modules: dictionary of module names and module paths, ie: {'ndb':'furious.extras.appengine.ndb_persistence'} Returns: module of the module path matching the name in known_modules """ from furious.job_utils import path_to_reference config = get_config() option_value = config[option_name] # If no known_modules were give, make it an empty dict. if not known_modules: known_modules = {} module_path = known_modules.get(option_value) or option_value return path_to_reference(module_path)
python
def _get_configured_module(option_name, known_modules=None): """Get the module specified by the value of option_name. The value of the configuration option will be used to load the module by name from the known module list or treated as a path if not found in known_modules. Args: option_name: name of persistence module known_modules: dictionary of module names and module paths, ie: {'ndb':'furious.extras.appengine.ndb_persistence'} Returns: module of the module path matching the name in known_modules """ from furious.job_utils import path_to_reference config = get_config() option_value = config[option_name] # If no known_modules were give, make it an empty dict. if not known_modules: known_modules = {} module_path = known_modules.get(option_value) or option_value return path_to_reference(module_path)
[ "def", "_get_configured_module", "(", "option_name", ",", "known_modules", "=", "None", ")", ":", "from", "furious", ".", "job_utils", "import", "path_to_reference", "config", "=", "get_config", "(", ")", "option_value", "=", "config", "[", "option_name", "]", "# If no known_modules were give, make it an empty dict.", "if", "not", "known_modules", ":", "known_modules", "=", "{", "}", "module_path", "=", "known_modules", ".", "get", "(", "option_value", ")", "or", "option_value", "return", "path_to_reference", "(", "module_path", ")" ]
Get the module specified by the value of option_name. The value of the configuration option will be used to load the module by name from the known module list or treated as a path if not found in known_modules. Args: option_name: name of persistence module known_modules: dictionary of module names and module paths, ie: {'ndb':'furious.extras.appengine.ndb_persistence'} Returns: module of the module path matching the name in known_modules
[ "Get", "the", "module", "specified", "by", "the", "value", "of", "option_name", ".", "The", "value", "of", "the", "configuration", "option", "will", "be", "used", "to", "load", "the", "module", "by", "name", "from", "the", "known", "module", "list", "or", "treated", "as", "a", "path", "if", "not", "found", "in", "known_modules", ".", "Args", ":", "option_name", ":", "name", "of", "persistence", "module", "known_modules", ":", "dictionary", "of", "module", "names", "and", "module", "paths", "ie", ":", "{", "ndb", ":", "furious", ".", "extras", ".", "appengine", ".", "ndb_persistence", "}", "Returns", ":", "module", "of", "the", "module", "path", "matching", "the", "name", "in", "known_modules" ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L74-L95
Workiva/furious
furious/config.py
find_furious_yaml
def find_furious_yaml(config_file=__file__): """ Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found """ checked = set() result = _find_furious_yaml(os.path.dirname(config_file), checked) if not result: result = _find_furious_yaml(os.getcwd(), checked) return result
python
def find_furious_yaml(config_file=__file__): """ Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found """ checked = set() result = _find_furious_yaml(os.path.dirname(config_file), checked) if not result: result = _find_furious_yaml(os.getcwd(), checked) return result
[ "def", "find_furious_yaml", "(", "config_file", "=", "__file__", ")", ":", "checked", "=", "set", "(", ")", "result", "=", "_find_furious_yaml", "(", "os", ".", "path", ".", "dirname", "(", "config_file", ")", ",", "checked", ")", "if", "not", "result", ":", "result", "=", "_find_furious_yaml", "(", "os", ".", "getcwd", "(", ")", ",", "checked", ")", "return", "result" ]
Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found
[ "Traverse", "directory", "trees", "to", "find", "a", "furious", ".", "yaml", "file" ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L98-L115
Workiva/furious
furious/config.py
_find_furious_yaml
def _find_furious_yaml(start, checked): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of furious.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the furious.yaml file or None if it is not found """ directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in FURIOUS_YAML_NAMES: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return None
python
def _find_furious_yaml(start, checked): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of furious.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the furious.yaml file or None if it is not found """ directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in FURIOUS_YAML_NAMES: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return None
[ "def", "_find_furious_yaml", "(", "start", ",", "checked", ")", ":", "directory", "=", "start", "while", "directory", "not", "in", "checked", ":", "checked", ".", "add", "(", "directory", ")", "for", "fs_yaml_name", "in", "FURIOUS_YAML_NAMES", ":", "yaml_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "fs_yaml_name", ")", "if", "os", ".", "path", ".", "exists", "(", "yaml_path", ")", ":", "return", "yaml_path", "directory", "=", "os", ".", "path", ".", "dirname", "(", "directory", ")", "return", "None" ]
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of furious.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the furious.yaml file or None if it is not found
[ "Traverse", "the", "directory", "tree", "identified", "by", "start", "until", "a", "directory", "already", "in", "checked", "is", "encountered", "or", "the", "path", "of", "furious", ".", "yaml", "is", "found", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L118-L142
Workiva/furious
furious/config.py
_load_yaml_config
def _load_yaml_config(path=None): """Open and return the yaml contents.""" furious_yaml_path = path or find_furious_yaml() if furious_yaml_path is None: logging.debug("furious.yaml not found.") return None with open(furious_yaml_path) as yaml_file: return yaml_file.read()
python
def _load_yaml_config(path=None): """Open and return the yaml contents.""" furious_yaml_path = path or find_furious_yaml() if furious_yaml_path is None: logging.debug("furious.yaml not found.") return None with open(furious_yaml_path) as yaml_file: return yaml_file.read()
[ "def", "_load_yaml_config", "(", "path", "=", "None", ")", ":", "furious_yaml_path", "=", "path", "or", "find_furious_yaml", "(", ")", "if", "furious_yaml_path", "is", "None", ":", "logging", ".", "debug", "(", "\"furious.yaml not found.\"", ")", "return", "None", "with", "open", "(", "furious_yaml_path", ")", "as", "yaml_file", ":", "return", "yaml_file", ".", "read", "(", ")" ]
Open and return the yaml contents.
[ "Open", "and", "return", "the", "yaml", "contents", "." ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L161-L169
Workiva/furious
furious/config.py
_parse_yaml_config
def _parse_yaml_config(config_data=None): """ Gets the configuration from the found furious.yaml file and parses the data. Returns: a dictionary parsed from the yaml file """ data_map = default_config() # If we were given config data to use, use it. Otherwise, see if there is # a furious.yaml to read the config from. Note that the empty string will # result in the default config being used. if config_data is None: config_data = _load_yaml_config() if not config_data: logging.debug("No custom furious config, using default config.") return data_map # TODO: validate the yaml contents config = yaml.safe_load(config_data) # If there was a valid custom config, it will be a dict. Otherwise, # ignore it. if isinstance(config, dict): # Apply the custom config over the default config. This allows us to # extend functionality without breaking old stuff. data_map.update(config) elif not None: raise InvalidYamlFile("The furious.yaml file " "is invalid yaml") return data_map
python
def _parse_yaml_config(config_data=None): """ Gets the configuration from the found furious.yaml file and parses the data. Returns: a dictionary parsed from the yaml file """ data_map = default_config() # If we were given config data to use, use it. Otherwise, see if there is # a furious.yaml to read the config from. Note that the empty string will # result in the default config being used. if config_data is None: config_data = _load_yaml_config() if not config_data: logging.debug("No custom furious config, using default config.") return data_map # TODO: validate the yaml contents config = yaml.safe_load(config_data) # If there was a valid custom config, it will be a dict. Otherwise, # ignore it. if isinstance(config, dict): # Apply the custom config over the default config. This allows us to # extend functionality without breaking old stuff. data_map.update(config) elif not None: raise InvalidYamlFile("The furious.yaml file " "is invalid yaml") return data_map
[ "def", "_parse_yaml_config", "(", "config_data", "=", "None", ")", ":", "data_map", "=", "default_config", "(", ")", "# If we were given config data to use, use it. Otherwise, see if there is", "# a furious.yaml to read the config from. Note that the empty string will", "# result in the default config being used.", "if", "config_data", "is", "None", ":", "config_data", "=", "_load_yaml_config", "(", ")", "if", "not", "config_data", ":", "logging", ".", "debug", "(", "\"No custom furious config, using default config.\"", ")", "return", "data_map", "# TODO: validate the yaml contents", "config", "=", "yaml", ".", "safe_load", "(", "config_data", ")", "# If there was a valid custom config, it will be a dict. Otherwise,", "# ignore it.", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "# Apply the custom config over the default config. This allows us to", "# extend functionality without breaking old stuff.", "data_map", ".", "update", "(", "config", ")", "elif", "not", "None", ":", "raise", "InvalidYamlFile", "(", "\"The furious.yaml file \"", "\"is invalid yaml\"", ")", "return", "data_map" ]
Gets the configuration from the found furious.yaml file and parses the data. Returns: a dictionary parsed from the yaml file
[ "Gets", "the", "configuration", "from", "the", "found", "furious", ".", "yaml", "file", "and", "parses", "the", "data", ".", "Returns", ":", "a", "dictionary", "parsed", "from", "the", "yaml", "file" ]
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L172-L204
horejsek/python-sqlpuzzle
sqlpuzzle/_queryparts/tables.py
Table._minimize_joins
def _minimize_joins(self): """ Minimizing of joins. Left/right and inner join of the same condition is only inner join. """ joins_group = [] for join in self._joins: append_new = True for join_group in joins_group: if join_group[0]['table'] == join['table'] and join_group[0]['ons'] == join['ons']: join_group.append(join) append_new = False break if append_new: joins_group.append([join]) self._joins = [] for joins in joins_group: if len(joins) > 1 and any(bool(join['type'] == INNER_JOIN) for join in joins): joins[0]['type'] = INNER_JOIN self._joins.append(joins[0]) elif len(joins) > 1 and all(join['type'] == joins[0]['type'] for join in joins): self._joins.append(joins[0]) else: self._joins.extend(joins)
python
def _minimize_joins(self): """ Minimizing of joins. Left/right and inner join of the same condition is only inner join. """ joins_group = [] for join in self._joins: append_new = True for join_group in joins_group: if join_group[0]['table'] == join['table'] and join_group[0]['ons'] == join['ons']: join_group.append(join) append_new = False break if append_new: joins_group.append([join]) self._joins = [] for joins in joins_group: if len(joins) > 1 and any(bool(join['type'] == INNER_JOIN) for join in joins): joins[0]['type'] = INNER_JOIN self._joins.append(joins[0]) elif len(joins) > 1 and all(join['type'] == joins[0]['type'] for join in joins): self._joins.append(joins[0]) else: self._joins.extend(joins)
[ "def", "_minimize_joins", "(", "self", ")", ":", "joins_group", "=", "[", "]", "for", "join", "in", "self", ".", "_joins", ":", "append_new", "=", "True", "for", "join_group", "in", "joins_group", ":", "if", "join_group", "[", "0", "]", "[", "'table'", "]", "==", "join", "[", "'table'", "]", "and", "join_group", "[", "0", "]", "[", "'ons'", "]", "==", "join", "[", "'ons'", "]", ":", "join_group", ".", "append", "(", "join", ")", "append_new", "=", "False", "break", "if", "append_new", ":", "joins_group", ".", "append", "(", "[", "join", "]", ")", "self", ".", "_joins", "=", "[", "]", "for", "joins", "in", "joins_group", ":", "if", "len", "(", "joins", ")", ">", "1", "and", "any", "(", "bool", "(", "join", "[", "'type'", "]", "==", "INNER_JOIN", ")", "for", "join", "in", "joins", ")", ":", "joins", "[", "0", "]", "[", "'type'", "]", "=", "INNER_JOIN", "self", ".", "_joins", ".", "append", "(", "joins", "[", "0", "]", ")", "elif", "len", "(", "joins", ")", ">", "1", "and", "all", "(", "join", "[", "'type'", "]", "==", "joins", "[", "0", "]", "[", "'type'", "]", "for", "join", "in", "joins", ")", ":", "self", ".", "_joins", ".", "append", "(", "joins", "[", "0", "]", ")", "else", ":", "self", ".", "_joins", ".", "extend", "(", "joins", ")" ]
Minimizing of joins. Left/right and inner join of the same condition is only inner join.
[ "Minimizing", "of", "joins", ".", "Left", "/", "right", "and", "inner", "join", "of", "the", "same", "condition", "is", "only", "inner", "join", "." ]
train
https://github.com/horejsek/python-sqlpuzzle/blob/d3a42ed1b339b8eafddb8d2c28a3a5832b3998dd/sqlpuzzle/_queryparts/tables.py#L143-L167
eumis/pyviews
pyviews/code.py
run_code
def run_code(node: Code, parent_node: Node = None, node_globals: InheritedDict = None, **args): #pylint: disable=unused-argument '''Executes node content as python module and adds its definitions to globals''' if not node.xml_node.text: return code = node.xml_node.text try: globs = node_globals.to_dictionary() exec(dedent(code), globs) #pylint: disable=exec-used definitions = [(key, value) for key, value in globs.items() \ if key != '__builtins__' and not node_globals.has_key(key)] for key, value in definitions: parent_node.node_globals[key] = value except SyntaxError as err: error = _get_compilation_error(code, 'Invalid syntax', err, err.lineno) raise error from err except: info = exc_info() cause = info[1] line_number = extract_tb(info[2])[-1][1] error = _get_compilation_error(code, 'Code execution is failed', cause, line_number) raise error from cause
python
def run_code(node: Code, parent_node: Node = None, node_globals: InheritedDict = None, **args): #pylint: disable=unused-argument '''Executes node content as python module and adds its definitions to globals''' if not node.xml_node.text: return code = node.xml_node.text try: globs = node_globals.to_dictionary() exec(dedent(code), globs) #pylint: disable=exec-used definitions = [(key, value) for key, value in globs.items() \ if key != '__builtins__' and not node_globals.has_key(key)] for key, value in definitions: parent_node.node_globals[key] = value except SyntaxError as err: error = _get_compilation_error(code, 'Invalid syntax', err, err.lineno) raise error from err except: info = exc_info() cause = info[1] line_number = extract_tb(info[2])[-1][1] error = _get_compilation_error(code, 'Code execution is failed', cause, line_number) raise error from cause
[ "def", "run_code", "(", "node", ":", "Code", ",", "parent_node", ":", "Node", "=", "None", ",", "node_globals", ":", "InheritedDict", "=", "None", ",", "*", "*", "args", ")", ":", "#pylint: disable=unused-argument", "if", "not", "node", ".", "xml_node", ".", "text", ":", "return", "code", "=", "node", ".", "xml_node", ".", "text", "try", ":", "globs", "=", "node_globals", ".", "to_dictionary", "(", ")", "exec", "(", "dedent", "(", "code", ")", ",", "globs", ")", "#pylint: disable=exec-used", "definitions", "=", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "globs", ".", "items", "(", ")", "if", "key", "!=", "'__builtins__'", "and", "not", "node_globals", ".", "has_key", "(", "key", ")", "]", "for", "key", ",", "value", "in", "definitions", ":", "parent_node", ".", "node_globals", "[", "key", "]", "=", "value", "except", "SyntaxError", "as", "err", ":", "error", "=", "_get_compilation_error", "(", "code", ",", "'Invalid syntax'", ",", "err", ",", "err", ".", "lineno", ")", "raise", "error", "from", "err", "except", ":", "info", "=", "exc_info", "(", ")", "cause", "=", "info", "[", "1", "]", "line_number", "=", "extract_tb", "(", "info", "[", "2", "]", ")", "[", "-", "1", "]", "[", "1", "]", "error", "=", "_get_compilation_error", "(", "code", ",", "'Code execution is failed'", ",", "cause", ",", "line_number", ")", "raise", "error", "from", "cause" ]
Executes node content as python module and adds its definitions to globals
[ "Executes", "node", "content", "as", "python", "module", "and", "adds", "its", "definitions", "to", "globals" ]
train
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/code.py#L13-L33
bcj/AttrDict
attrdict/merge.py
merge
def merge(left, right): """ Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)). """ merged = {} left_keys = frozenset(left) right_keys = frozenset(right) # Items only in the left Mapping for key in left_keys - right_keys: merged[key] = left[key] # Items only in the right Mapping for key in right_keys - left_keys: merged[key] = right[key] # in both for key in left_keys & right_keys: left_value = left[key] right_value = right[key] if (isinstance(left_value, Mapping) and isinstance(right_value, Mapping)): # recursive merge merged[key] = merge(left_value, right_value) else: # overwrite with right value merged[key] = right_value return merged
python
def merge(left, right): """ Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)). """ merged = {} left_keys = frozenset(left) right_keys = frozenset(right) # Items only in the left Mapping for key in left_keys - right_keys: merged[key] = left[key] # Items only in the right Mapping for key in right_keys - left_keys: merged[key] = right[key] # in both for key in left_keys & right_keys: left_value = left[key] right_value = right[key] if (isinstance(left_value, Mapping) and isinstance(right_value, Mapping)): # recursive merge merged[key] = merge(left_value, right_value) else: # overwrite with right value merged[key] = right_value return merged
[ "def", "merge", "(", "left", ",", "right", ")", ":", "merged", "=", "{", "}", "left_keys", "=", "frozenset", "(", "left", ")", "right_keys", "=", "frozenset", "(", "right", ")", "# Items only in the left Mapping", "for", "key", "in", "left_keys", "-", "right_keys", ":", "merged", "[", "key", "]", "=", "left", "[", "key", "]", "# Items only in the right Mapping", "for", "key", "in", "right_keys", "-", "left_keys", ":", "merged", "[", "key", "]", "=", "right", "[", "key", "]", "# in both", "for", "key", "in", "left_keys", "&", "right_keys", ":", "left_value", "=", "left", "[", "key", "]", "right_value", "=", "right", "[", "key", "]", "if", "(", "isinstance", "(", "left_value", ",", "Mapping", ")", "and", "isinstance", "(", "right_value", ",", "Mapping", ")", ")", ":", "# recursive merge", "merged", "[", "key", "]", "=", "merge", "(", "left_value", ",", "right_value", ")", "else", ":", "# overwrite with right value", "merged", "[", "key", "]", "=", "right_value", "return", "merged" ]
Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)).
[ "Merge", "two", "mappings", "objects", "together", "combining", "overlapping", "Mappings", "and", "favoring", "right", "-", "values" ]
train
https://github.com/bcj/AttrDict/blob/8c1883162178a124ee29144ca7abcd83cbd9d222/attrdict/merge.py#L10-L44
bcj/AttrDict
attrdict/default.py
AttrDefault._constructor
def _constructor(cls, mapping, configuration): """ A standardized constructor. """ sequence_type, default_factory, pass_key = configuration return cls(default_factory, mapping, sequence_type=sequence_type, pass_key=pass_key)
python
def _constructor(cls, mapping, configuration): """ A standardized constructor. """ sequence_type, default_factory, pass_key = configuration return cls(default_factory, mapping, sequence_type=sequence_type, pass_key=pass_key)
[ "def", "_constructor", "(", "cls", ",", "mapping", ",", "configuration", ")", ":", "sequence_type", ",", "default_factory", ",", "pass_key", "=", "configuration", "return", "cls", "(", "default_factory", ",", "mapping", ",", "sequence_type", "=", "sequence_type", ",", "pass_key", "=", "pass_key", ")" ]
A standardized constructor.
[ "A", "standardized", "constructor", "." ]
train
https://github.com/bcj/AttrDict/blob/8c1883162178a124ee29144ca7abcd83cbd9d222/attrdict/default.py#L124-L130
bcj/AttrDict
attrdict/dictionary.py
AttrDict._constructor
def _constructor(cls, mapping, configuration): """ A standardized constructor. """ attr = cls(mapping) attr._setattr('_sequence_type', configuration) return attr
python
def _constructor(cls, mapping, configuration): """ A standardized constructor. """ attr = cls(mapping) attr._setattr('_sequence_type', configuration) return attr
[ "def", "_constructor", "(", "cls", ",", "mapping", ",", "configuration", ")", ":", "attr", "=", "cls", "(", "mapping", ")", "attr", ".", "_setattr", "(", "'_sequence_type'", ",", "configuration", ")", "return", "attr" ]
A standardized constructor.
[ "A", "standardized", "constructor", "." ]
train
https://github.com/bcj/AttrDict/blob/8c1883162178a124ee29144ca7abcd83cbd9d222/attrdict/dictionary.py#L53-L60
ruipgil/changepy
changepy/pelt.py
pelt
def pelt(cost, length, pen=None): """ PELT algorithm to compute changepoints in time series Ported from: https://github.com/STOR-i/Changepoints.jl https://github.com/rkillick/changepoint/ Reference: Killick R, Fearnhead P, Eckley IA (2012) Optimal detection of changepoints with a linear computational cost, JASA 107(500), 1590-1598 Args: cost (function): cost function, with the following signature, (int, int) -> float where the parameters are the start index, and the second the last index of the segment to compute the cost. length (int): Data size pen (float, optional): defaults to log(n) Returns: (:obj:`list` of int): List with the indexes of changepoints """ if pen is None: pen = np.log(length) F = np.zeros(length + 1) R = np.array([0], dtype=np.int) candidates = np.zeros(length + 1, dtype=np.int) F[0] = -pen for tstar in range(2, length + 1): cpt_cands = R seg_costs = np.zeros(len(cpt_cands)) for i in range(0, len(cpt_cands)): seg_costs[i] = cost(cpt_cands[i], tstar) F_cost = F[cpt_cands] + seg_costs F[tstar], tau = find_min(F_cost, pen) candidates[tstar] = cpt_cands[tau] ineq_prune = [val < F[tstar] for val in F_cost] R = [cpt_cands[j] for j, val in enumerate(ineq_prune) if val] R.append(tstar - 1) R = np.array(R, dtype=np.int) last = candidates[-1] changepoints = [last] while last > 0: last = candidates[last] changepoints.append(last) return sorted(changepoints)
python
def pelt(cost, length, pen=None): """ PELT algorithm to compute changepoints in time series Ported from: https://github.com/STOR-i/Changepoints.jl https://github.com/rkillick/changepoint/ Reference: Killick R, Fearnhead P, Eckley IA (2012) Optimal detection of changepoints with a linear computational cost, JASA 107(500), 1590-1598 Args: cost (function): cost function, with the following signature, (int, int) -> float where the parameters are the start index, and the second the last index of the segment to compute the cost. length (int): Data size pen (float, optional): defaults to log(n) Returns: (:obj:`list` of int): List with the indexes of changepoints """ if pen is None: pen = np.log(length) F = np.zeros(length + 1) R = np.array([0], dtype=np.int) candidates = np.zeros(length + 1, dtype=np.int) F[0] = -pen for tstar in range(2, length + 1): cpt_cands = R seg_costs = np.zeros(len(cpt_cands)) for i in range(0, len(cpt_cands)): seg_costs[i] = cost(cpt_cands[i], tstar) F_cost = F[cpt_cands] + seg_costs F[tstar], tau = find_min(F_cost, pen) candidates[tstar] = cpt_cands[tau] ineq_prune = [val < F[tstar] for val in F_cost] R = [cpt_cands[j] for j, val in enumerate(ineq_prune) if val] R.append(tstar - 1) R = np.array(R, dtype=np.int) last = candidates[-1] changepoints = [last] while last > 0: last = candidates[last] changepoints.append(last) return sorted(changepoints)
[ "def", "pelt", "(", "cost", ",", "length", ",", "pen", "=", "None", ")", ":", "if", "pen", "is", "None", ":", "pen", "=", "np", ".", "log", "(", "length", ")", "F", "=", "np", ".", "zeros", "(", "length", "+", "1", ")", "R", "=", "np", ".", "array", "(", "[", "0", "]", ",", "dtype", "=", "np", ".", "int", ")", "candidates", "=", "np", ".", "zeros", "(", "length", "+", "1", ",", "dtype", "=", "np", ".", "int", ")", "F", "[", "0", "]", "=", "-", "pen", "for", "tstar", "in", "range", "(", "2", ",", "length", "+", "1", ")", ":", "cpt_cands", "=", "R", "seg_costs", "=", "np", ".", "zeros", "(", "len", "(", "cpt_cands", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "cpt_cands", ")", ")", ":", "seg_costs", "[", "i", "]", "=", "cost", "(", "cpt_cands", "[", "i", "]", ",", "tstar", ")", "F_cost", "=", "F", "[", "cpt_cands", "]", "+", "seg_costs", "F", "[", "tstar", "]", ",", "tau", "=", "find_min", "(", "F_cost", ",", "pen", ")", "candidates", "[", "tstar", "]", "=", "cpt_cands", "[", "tau", "]", "ineq_prune", "=", "[", "val", "<", "F", "[", "tstar", "]", "for", "val", "in", "F_cost", "]", "R", "=", "[", "cpt_cands", "[", "j", "]", "for", "j", ",", "val", "in", "enumerate", "(", "ineq_prune", ")", "if", "val", "]", "R", ".", "append", "(", "tstar", "-", "1", ")", "R", "=", "np", ".", "array", "(", "R", ",", "dtype", "=", "np", ".", "int", ")", "last", "=", "candidates", "[", "-", "1", "]", "changepoints", "=", "[", "last", "]", "while", "last", ">", "0", ":", "last", "=", "candidates", "[", "last", "]", "changepoints", ".", "append", "(", "last", ")", "return", "sorted", "(", "changepoints", ")" ]
PELT algorithm to compute changepoints in time series Ported from: https://github.com/STOR-i/Changepoints.jl https://github.com/rkillick/changepoint/ Reference: Killick R, Fearnhead P, Eckley IA (2012) Optimal detection of changepoints with a linear computational cost, JASA 107(500), 1590-1598 Args: cost (function): cost function, with the following signature, (int, int) -> float where the parameters are the start index, and the second the last index of the segment to compute the cost. length (int): Data size pen (float, optional): defaults to log(n) Returns: (:obj:`list` of int): List with the indexes of changepoints
[ "PELT", "algorithm", "to", "compute", "changepoints", "in", "time", "series" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/pelt.py#L14-L65
ruipgil/changepy
changepy/costs.py
normal_mean
def normal_mean(data, variance): """ Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ if not isinstance(data, np.ndarray): data = np.array(data) i_variance_2 = 1 / (variance ** 2) cmm = [0.0] cmm.extend(np.cumsum(data)) cmm2 = [0.0] cmm2.extend(np.cumsum(np.abs(data))) def cost(start, end): """ Cost function for normal distribution with variable mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ cmm2_diff = cmm2[end] - cmm2[start] cmm_diff = pow(cmm[end] - cmm[start], 2) i_diff = end - start diff = cmm2_diff - cmm_diff return (diff/i_diff) * i_variance_2 return cost
python
def normal_mean(data, variance): """ Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ if not isinstance(data, np.ndarray): data = np.array(data) i_variance_2 = 1 / (variance ** 2) cmm = [0.0] cmm.extend(np.cumsum(data)) cmm2 = [0.0] cmm2.extend(np.cumsum(np.abs(data))) def cost(start, end): """ Cost function for normal distribution with variable mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ cmm2_diff = cmm2[end] - cmm2[start] cmm_diff = pow(cmm[end] - cmm[start], 2) i_diff = end - start diff = cmm2_diff - cmm_diff return (diff/i_diff) * i_variance_2 return cost
[ "def", "normal_mean", "(", "data", ",", "variance", ")", ":", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "np", ".", "array", "(", "data", ")", "i_variance_2", "=", "1", "/", "(", "variance", "**", "2", ")", "cmm", "=", "[", "0.0", "]", "cmm", ".", "extend", "(", "np", ".", "cumsum", "(", "data", ")", ")", "cmm2", "=", "[", "0.0", "]", "cmm2", ".", "extend", "(", "np", ".", "cumsum", "(", "np", ".", "abs", "(", "data", ")", ")", ")", "def", "cost", "(", "start", ",", "end", ")", ":", "\"\"\" Cost function for normal distribution with variable mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "cmm2_diff", "=", "cmm2", "[", "end", "]", "-", "cmm2", "[", "start", "]", "cmm_diff", "=", "pow", "(", "cmm", "[", "end", "]", "-", "cmm", "[", "start", "]", ",", "2", ")", "i_diff", "=", "end", "-", "start", "diff", "=", "cmm2_diff", "-", "cmm_diff", "return", "(", "diff", "/", "i_diff", ")", "*", "i_variance_2", "return", "cost" ]
Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "Normal", "distribution", "with", "changing", "mean" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L3-L41
ruipgil/changepy
changepy/costs.py
normal_var
def normal_var(data, mean): """ Creates a segment cost function for a time series with a Normal distribution with changing variance Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ if not isinstance(data, np.ndarray): data = np.array(data) cumm = [0.0] cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2))) def cost(s, t): """ Cost function for normal distribution with variable variance Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ dist = float(t - s) diff = cumm[t] - cumm[s] return dist * np.log(diff/dist) return cost
python
def normal_var(data, mean): """ Creates a segment cost function for a time series with a Normal distribution with changing variance Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ if not isinstance(data, np.ndarray): data = np.array(data) cumm = [0.0] cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2))) def cost(s, t): """ Cost function for normal distribution with variable variance Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ dist = float(t - s) diff = cumm[t] - cumm[s] return dist * np.log(diff/dist) return cost
[ "def", "normal_var", "(", "data", ",", "mean", ")", ":", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "np", ".", "array", "(", "data", ")", "cumm", "=", "[", "0.0", "]", "cumm", ".", "extend", "(", "np", ".", "cumsum", "(", "np", ".", "power", "(", "np", ".", "abs", "(", "data", "-", "mean", ")", ",", "2", ")", ")", ")", "def", "cost", "(", "s", ",", "t", ")", ":", "\"\"\" Cost function for normal distribution with variable variance\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "dist", "=", "float", "(", "t", "-", "s", ")", "diff", "=", "cumm", "[", "t", "]", "-", "cumm", "[", "s", "]", "return", "dist", "*", "np", ".", "log", "(", "diff", "/", "dist", ")", "return", "cost" ]
Creates a segment cost function for a time series with a Normal distribution with changing variance Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "Normal", "distribution", "with", "changing", "variance" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L43-L75
ruipgil/changepy
changepy/costs.py
normal_meanvar
def normal_meanvar(data): """ Creates a segment cost function for a time series with a Normal distribution with changing mean and variance Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) cumm_sq = np.cumsum([val**2 for val in data]) def cost(s, t): """ Cost function for normal distribution with variable variance Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ ts_i = 1.0 / (t-s) mu = (cumm[t] - cumm[s]) * ts_i sig = (cumm_sq[t] - cumm_sq[s]) * ts_i - mu**2 sig_i = 1.0 / sig return (t-s) * np.log(sig) + (cumm_sq[t] - cumm_sq[s]) * sig_i - 2*(cumm[t] - cumm[s])*mu*sig_i + ((t-s)*mu**2)*sig_i return cost
python
def normal_meanvar(data): """ Creates a segment cost function for a time series with a Normal distribution with changing mean and variance Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) cumm_sq = np.cumsum([val**2 for val in data]) def cost(s, t): """ Cost function for normal distribution with variable variance Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ ts_i = 1.0 / (t-s) mu = (cumm[t] - cumm[s]) * ts_i sig = (cumm_sq[t] - cumm_sq[s]) * ts_i - mu**2 sig_i = 1.0 / sig return (t-s) * np.log(sig) + (cumm_sq[t] - cumm_sq[s]) * sig_i - 2*(cumm[t] - cumm[s])*mu*sig_i + ((t-s)*mu**2)*sig_i return cost
[ "def", "normal_meanvar", "(", "data", ")", ":", "data", "=", "np", ".", "hstack", "(", "(", "[", "0.0", "]", ",", "np", ".", "array", "(", "data", ")", ")", ")", "cumm", "=", "np", ".", "cumsum", "(", "data", ")", "cumm_sq", "=", "np", ".", "cumsum", "(", "[", "val", "**", "2", "for", "val", "in", "data", "]", ")", "def", "cost", "(", "s", ",", "t", ")", ":", "\"\"\" Cost function for normal distribution with variable variance\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "ts_i", "=", "1.0", "/", "(", "t", "-", "s", ")", "mu", "=", "(", "cumm", "[", "t", "]", "-", "cumm", "[", "s", "]", ")", "*", "ts_i", "sig", "=", "(", "cumm_sq", "[", "t", "]", "-", "cumm_sq", "[", "s", "]", ")", "*", "ts_i", "-", "mu", "**", "2", "sig_i", "=", "1.0", "/", "sig", "return", "(", "t", "-", "s", ")", "*", "np", ".", "log", "(", "sig", ")", "+", "(", "cumm_sq", "[", "t", "]", "-", "cumm_sq", "[", "s", "]", ")", "*", "sig_i", "-", "2", "*", "(", "cumm", "[", "t", "]", "-", "cumm", "[", "s", "]", ")", "*", "mu", "*", "sig_i", "+", "(", "(", "t", "-", "s", ")", "*", "mu", "**", "2", ")", "*", "sig_i", "return", "cost" ]
Creates a segment cost function for a time series with a Normal distribution with changing mean and variance Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "Normal", "distribution", "with", "changing", "mean", "and", "variance" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L77-L109
ruipgil/changepy
changepy/costs.py
poisson
def poisson(data): """ Creates a segment cost function for a time series with a poisson distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): """ Cost function for poisson distribution with changing mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ diff = cumm[t]-cumm[s] if diff == 0: return -2 * diff * (- np.log(t-s) - 1) else: return -2 * diff * (np.log(diff) - np.log(t-s) - 1) return cost
python
def poisson(data): """ Creates a segment cost function for a time series with a poisson distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): """ Cost function for poisson distribution with changing mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ diff = cumm[t]-cumm[s] if diff == 0: return -2 * diff * (- np.log(t-s) - 1) else: return -2 * diff * (np.log(diff) - np.log(t-s) - 1) return cost
[ "def", "poisson", "(", "data", ")", ":", "data", "=", "np", ".", "hstack", "(", "(", "[", "0.0", "]", ",", "np", ".", "array", "(", "data", ")", ")", ")", "cumm", "=", "np", ".", "cumsum", "(", "data", ")", "def", "cost", "(", "s", ",", "t", ")", ":", "\"\"\" Cost function for poisson distribution with changing mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "diff", "=", "cumm", "[", "t", "]", "-", "cumm", "[", "s", "]", "if", "diff", "==", "0", ":", "return", "-", "2", "*", "diff", "*", "(", "-", "np", ".", "log", "(", "t", "-", "s", ")", "-", "1", ")", "else", ":", "return", "-", "2", "*", "diff", "*", "(", "np", ".", "log", "(", "diff", ")", "-", "np", ".", "log", "(", "t", "-", "s", ")", "-", "1", ")", "return", "cost" ]
Creates a segment cost function for a time series with a poisson distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "poisson", "distribution", "with", "changing", "mean" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L111-L141
ruipgil/changepy
changepy/costs.py
exponential
def exponential(data): """ Creates a segment cost function for a time series with a exponential distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): """ Cost function for exponential distribution with changing mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ return -1*(t-s) * (np.log(t-s) - np.log(cumm[t] - cumm[s])) return cost
python
def exponential(data): """ Creates a segment cost function for a time series with a exponential distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): """ Cost function for exponential distribution with changing mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ return -1*(t-s) * (np.log(t-s) - np.log(cumm[t] - cumm[s])) return cost
[ "def", "exponential", "(", "data", ")", ":", "data", "=", "np", ".", "hstack", "(", "(", "[", "0.0", "]", ",", "np", ".", "array", "(", "data", ")", ")", ")", "cumm", "=", "np", ".", "cumsum", "(", "data", ")", "def", "cost", "(", "s", ",", "t", ")", ":", "\"\"\" Cost function for exponential distribution with changing mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "return", "-", "1", "*", "(", "t", "-", "s", ")", "*", "(", "np", ".", "log", "(", "t", "-", "s", ")", "-", "np", ".", "log", "(", "cumm", "[", "t", "]", "-", "cumm", "[", "s", "]", ")", ")", "return", "cost" ]
Creates a segment cost function for a time series with a exponential distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "exponential", "distribution", "with", "changing", "mean" ]
train
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L143-L169
openaddresses/pyesridump
esridump/dumper.py
EsriDumper._get_layer_min_max
def _get_layer_min_max(self, oid_field_name): """ Find the min and max values for the OID field. """ query_args = self._build_query_args({ 'f': 'json', 'outFields': '', 'outStatistics': json.dumps([ dict(statisticType='min', onStatisticField=oid_field_name, outStatisticFieldName='THE_MIN'), dict(statisticType='max', onStatisticField=oid_field_name, outStatisticFieldName='THE_MAX'), ], separators=(',', ':')) }) headers = self._build_headers() url = self._build_url('/query') response = self._request('GET', url, params=query_args, headers=headers) metadata = self._handle_esri_errors(response, "Could not retrieve min/max oid values") # Some servers (specifically version 10.11, it seems) will respond with SQL statements # for the attribute names rather than the requested field names, so pick the min and max # deliberately rather than relying on the names. min_max_values = metadata['features'][0]['attributes'].values() min_value = min(min_max_values) max_value = max(min_max_values) query_args = self._build_query_args({ 'f': 'json', 'outFields': '*', 'outStatistics': json.dumps([ dict(statisticType='min', onStatisticField=oid_field_name, outStatisticFieldName='THE_MIN'), dict(statisticType='max', onStatisticField=oid_field_name, outStatisticFieldName='THE_MAX'), ], separators=(',', ':')) }) query_args = self._build_query_args({ 'where': '{} = {} OR {} = {}'.format( oid_field_name, min_value, oid_field_name, max_value ), 'returnIdsOnly': 'true', 'f': 'json', }) headers = self._build_headers() url = self._build_url('/query') response = self._request('GET', url, params=query_args, headers=headers) oid_data = self._handle_esri_errors(response, "Could not check min/max values") if not oid_data or not oid_data.get('objectIds') or min_value not in oid_data['objectIds'] or max_value not in oid_data['objectIds']: raise EsriDownloadError('Server returned invalid min/max') return (min_value, max_value)
python
def _get_layer_min_max(self, oid_field_name): """ Find the min and max values for the OID field. """ query_args = self._build_query_args({ 'f': 'json', 'outFields': '', 'outStatistics': json.dumps([ dict(statisticType='min', onStatisticField=oid_field_name, outStatisticFieldName='THE_MIN'), dict(statisticType='max', onStatisticField=oid_field_name, outStatisticFieldName='THE_MAX'), ], separators=(',', ':')) }) headers = self._build_headers() url = self._build_url('/query') response = self._request('GET', url, params=query_args, headers=headers) metadata = self._handle_esri_errors(response, "Could not retrieve min/max oid values") # Some servers (specifically version 10.11, it seems) will respond with SQL statements # for the attribute names rather than the requested field names, so pick the min and max # deliberately rather than relying on the names. min_max_values = metadata['features'][0]['attributes'].values() min_value = min(min_max_values) max_value = max(min_max_values) query_args = self._build_query_args({ 'f': 'json', 'outFields': '*', 'outStatistics': json.dumps([ dict(statisticType='min', onStatisticField=oid_field_name, outStatisticFieldName='THE_MIN'), dict(statisticType='max', onStatisticField=oid_field_name, outStatisticFieldName='THE_MAX'), ], separators=(',', ':')) }) query_args = self._build_query_args({ 'where': '{} = {} OR {} = {}'.format( oid_field_name, min_value, oid_field_name, max_value ), 'returnIdsOnly': 'true', 'f': 'json', }) headers = self._build_headers() url = self._build_url('/query') response = self._request('GET', url, params=query_args, headers=headers) oid_data = self._handle_esri_errors(response, "Could not check min/max values") if not oid_data or not oid_data.get('objectIds') or min_value not in oid_data['objectIds'] or max_value not in oid_data['objectIds']: raise EsriDownloadError('Server returned invalid min/max') return (min_value, max_value)
[ "def", "_get_layer_min_max", "(", "self", ",", "oid_field_name", ")", ":", "query_args", "=", "self", ".", "_build_query_args", "(", "{", "'f'", ":", "'json'", ",", "'outFields'", ":", "''", ",", "'outStatistics'", ":", "json", ".", "dumps", "(", "[", "dict", "(", "statisticType", "=", "'min'", ",", "onStatisticField", "=", "oid_field_name", ",", "outStatisticFieldName", "=", "'THE_MIN'", ")", ",", "dict", "(", "statisticType", "=", "'max'", ",", "onStatisticField", "=", "oid_field_name", ",", "outStatisticFieldName", "=", "'THE_MAX'", ")", ",", "]", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "}", ")", "headers", "=", "self", ".", "_build_headers", "(", ")", "url", "=", "self", ".", "_build_url", "(", "'/query'", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "url", ",", "params", "=", "query_args", ",", "headers", "=", "headers", ")", "metadata", "=", "self", ".", "_handle_esri_errors", "(", "response", ",", "\"Could not retrieve min/max oid values\"", ")", "# Some servers (specifically version 10.11, it seems) will respond with SQL statements", "# for the attribute names rather than the requested field names, so pick the min and max", "# deliberately rather than relying on the names.", "min_max_values", "=", "metadata", "[", "'features'", "]", "[", "0", "]", "[", "'attributes'", "]", ".", "values", "(", ")", "min_value", "=", "min", "(", "min_max_values", ")", "max_value", "=", "max", "(", "min_max_values", ")", "query_args", "=", "self", ".", "_build_query_args", "(", "{", "'f'", ":", "'json'", ",", "'outFields'", ":", "'*'", ",", "'outStatistics'", ":", "json", ".", "dumps", "(", "[", "dict", "(", "statisticType", "=", "'min'", ",", "onStatisticField", "=", "oid_field_name", ",", "outStatisticFieldName", "=", "'THE_MIN'", ")", ",", "dict", "(", "statisticType", "=", "'max'", ",", "onStatisticField", "=", "oid_field_name", ",", "outStatisticFieldName", "=", "'THE_MAX'", ")", ",", "]", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "}", ")", "query_args", "=", "self", ".", "_build_query_args", "(", "{", "'where'", ":", "'{} = {} OR {} = {}'", ".", "format", "(", "oid_field_name", ",", "min_value", ",", "oid_field_name", ",", "max_value", ")", ",", "'returnIdsOnly'", ":", "'true'", ",", "'f'", ":", "'json'", ",", "}", ")", "headers", "=", "self", ".", "_build_headers", "(", ")", "url", "=", "self", ".", "_build_url", "(", "'/query'", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "url", ",", "params", "=", "query_args", ",", "headers", "=", "headers", ")", "oid_data", "=", "self", ".", "_handle_esri_errors", "(", "response", ",", "\"Could not check min/max values\"", ")", "if", "not", "oid_data", "or", "not", "oid_data", ".", "get", "(", "'objectIds'", ")", "or", "min_value", "not", "in", "oid_data", "[", "'objectIds'", "]", "or", "max_value", "not", "in", "oid_data", "[", "'objectIds'", "]", ":", "raise", "EsriDownloadError", "(", "'Server returned invalid min/max'", ")", "return", "(", "min_value", ",", "max_value", ")" ]
Find the min and max values for the OID field.
[ "Find", "the", "min", "and", "max", "values", "for", "the", "OID", "field", "." ]
train
https://github.com/openaddresses/pyesridump/blob/378155816559134b8d2b3de0d0f2fddc74f23fcd/esridump/dumper.py#L166-L211
openaddresses/pyesridump
esridump/esri2geojson.py
ring_is_clockwise
def ring_is_clockwise(ring): """ Determine if polygon ring coordinates are clockwise. Clockwise signifies outer ring, counter-clockwise an inner ring or hole. this logic was found at http://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order this code taken from http://esri.github.com/geojson-utils/src/jsonConverters.js by James Cardona (MIT lisense) """ total = 0 for (pt1, pt2) in pairwise(ring): total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1]) return total >= 0
python
def ring_is_clockwise(ring): """ Determine if polygon ring coordinates are clockwise. Clockwise signifies outer ring, counter-clockwise an inner ring or hole. this logic was found at http://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order this code taken from http://esri.github.com/geojson-utils/src/jsonConverters.js by James Cardona (MIT lisense) """ total = 0 for (pt1, pt2) in pairwise(ring): total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1]) return total >= 0
[ "def", "ring_is_clockwise", "(", "ring", ")", ":", "total", "=", "0", "for", "(", "pt1", ",", "pt2", ")", "in", "pairwise", "(", "ring", ")", ":", "total", "+=", "(", "pt2", "[", "0", "]", "-", "pt1", "[", "0", "]", ")", "*", "(", "pt2", "[", "1", "]", "+", "pt1", "[", "1", "]", ")", "return", "total", ">=", "0" ]
Determine if polygon ring coordinates are clockwise. Clockwise signifies outer ring, counter-clockwise an inner ring or hole. this logic was found at http://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order this code taken from http://esri.github.com/geojson-utils/src/jsonConverters.js by James Cardona (MIT lisense)
[ "Determine", "if", "polygon", "ring", "coordinates", "are", "clockwise", ".", "Clockwise", "signifies", "outer", "ring", "counter", "-", "clockwise", "an", "inner", "ring", "or", "hole", ".", "this", "logic", "was", "found", "at", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "1165647", "/", "how", "-", "to", "-", "determine", "-", "if", "-", "a", "-", "list", "-", "of", "-", "polygon", "-", "points", "-", "are", "-", "in", "-", "clockwise", "-", "order", "this", "code", "taken", "from", "http", ":", "//", "esri", ".", "github", ".", "com", "/", "geojson", "-", "utils", "/", "src", "/", "jsonConverters", ".", "js", "by", "James", "Cardona", "(", "MIT", "lisense", ")" ]
train
https://github.com/openaddresses/pyesridump/blob/378155816559134b8d2b3de0d0f2fddc74f23fcd/esridump/esri2geojson.py#L126-L136
mdsol/rwslib
rwslib/rws_requests/__init__.py
format_date_argument
def format_date_argument(date_element): """ Take a date as either a datetime.date/datetime or a string and return it as a iso8601 formatted value :param Union[datetime.date, datetime.datetime] date_element: passed argument :rtype str :return: """ if not isinstance(date_element, (datetime.datetime, datetime.date)): # TODO: if "T" in date_element: _date = datetime.datetime.strptime(date_element, "%Y-%m-%dT%H:%M:%S") else: _date = datetime.datetime.strptime(date_element, "%Y-%m-%d").date() else: _date = date_element return _date.isoformat()
python
def format_date_argument(date_element): """ Take a date as either a datetime.date/datetime or a string and return it as a iso8601 formatted value :param Union[datetime.date, datetime.datetime] date_element: passed argument :rtype str :return: """ if not isinstance(date_element, (datetime.datetime, datetime.date)): # TODO: if "T" in date_element: _date = datetime.datetime.strptime(date_element, "%Y-%m-%dT%H:%M:%S") else: _date = datetime.datetime.strptime(date_element, "%Y-%m-%d").date() else: _date = date_element return _date.isoformat()
[ "def", "format_date_argument", "(", "date_element", ")", ":", "if", "not", "isinstance", "(", "date_element", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ")", ")", ":", "# TODO:", "if", "\"T\"", "in", "date_element", ":", "_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date_element", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")", "else", ":", "_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date_element", ",", "\"%Y-%m-%d\"", ")", ".", "date", "(", ")", "else", ":", "_date", "=", "date_element", "return", "_date", ".", "isoformat", "(", ")" ]
Take a date as either a datetime.date/datetime or a string and return it as a iso8601 formatted value :param Union[datetime.date, datetime.datetime] date_element: passed argument :rtype str :return:
[ "Take", "a", "date", "as", "either", "a", "datetime", ".", "date", "/", "datetime", "or", "a", "string", "and", "return", "it", "as", "a", "iso8601", "formatted", "value", ":", "param", "Union", "[", "datetime", ".", "date", "datetime", ".", "datetime", "]", "date_element", ":", "passed", "argument", ":", "rtype", "str", ":", "return", ":" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/__init__.py#L35-L50
mdsol/rwslib
rwslib/rws_requests/__init__.py
make_url
def make_url(*args, **kwargs): """Makes a URL from component parts""" base = "/".join(args) if kwargs: return "%s?%s" % (base, urlencode(kwargs)) else: return base
python
def make_url(*args, **kwargs): """Makes a URL from component parts""" base = "/".join(args) if kwargs: return "%s?%s" % (base, urlencode(kwargs)) else: return base
[ "def", "make_url", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "base", "=", "\"/\"", ".", "join", "(", "args", ")", "if", "kwargs", ":", "return", "\"%s?%s\"", "%", "(", "base", ",", "urlencode", "(", "kwargs", ")", ")", "else", ":", "return", "base" ]
Makes a URL from component parts
[ "Makes", "a", "URL", "from", "component", "parts" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/__init__.py#L56-L62
mdsol/rwslib
rwslib/rws_requests/__init__.py
QueryOptionGetRequest._querystring
def _querystring(self): """Get additional keyword arguments""" kw = {} for key in self.KNOWN_QUERY_OPTIONS: val = getattr(self, key) if val is not None: kw[key] = val return kw
python
def _querystring(self): """Get additional keyword arguments""" kw = {} for key in self.KNOWN_QUERY_OPTIONS: val = getattr(self, key) if val is not None: kw[key] = val return kw
[ "def", "_querystring", "(", "self", ")", ":", "kw", "=", "{", "}", "for", "key", "in", "self", ".", "KNOWN_QUERY_OPTIONS", ":", "val", "=", "getattr", "(", "self", ",", "key", ")", "if", "val", "is", "not", "None", ":", "kw", "[", "key", "]", "=", "val", "return", "kw" ]
Get additional keyword arguments
[ "Get", "additional", "keyword", "arguments" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/__init__.py#L130-L138
mdsol/rwslib
rwslib/rws_requests/__init__.py
StudySubjectsRequest._querystring
def _querystring(self): """Additional keyword arguments""" kw = {} if self.status: kw["status"] = "all" if self.links: kw["links"] = "all" if self.include is not None: kw["include"] = self.include if self.subject_key_type != "SubjectName": kw["subjectKeyType"] = self.subject_key_type return kw
python
def _querystring(self): """Additional keyword arguments""" kw = {} if self.status: kw["status"] = "all" if self.links: kw["links"] = "all" if self.include is not None: kw["include"] = self.include if self.subject_key_type != "SubjectName": kw["subjectKeyType"] = self.subject_key_type return kw
[ "def", "_querystring", "(", "self", ")", ":", "kw", "=", "{", "}", "if", "self", ".", "status", ":", "kw", "[", "\"status\"", "]", "=", "\"all\"", "if", "self", ".", "links", ":", "kw", "[", "\"links\"", "]", "=", "\"all\"", "if", "self", ".", "include", "is", "not", "None", ":", "kw", "[", "\"include\"", "]", "=", "self", ".", "include", "if", "self", ".", "subject_key_type", "!=", "\"SubjectName\"", ":", "kw", "[", "\"subjectKeyType\"", "]", "=", "self", ".", "subject_key_type", "return", "kw" ]
Additional keyword arguments
[ "Additional", "keyword", "arguments" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/__init__.py#L478-L491
mdsol/rwslib
rwslib/rws_requests/__init__.py
ConfigurableDatasetRequest.dataset
def dataset(self): """ Qualify the dataset_name with the dataset_format if supplied :return: dataset name :rtype: str """ if self.dataset_format: return ".".join([self.dataset_name, self.dataset_format]) return self.dataset_name
python
def dataset(self): """ Qualify the dataset_name with the dataset_format if supplied :return: dataset name :rtype: str """ if self.dataset_format: return ".".join([self.dataset_name, self.dataset_format]) return self.dataset_name
[ "def", "dataset", "(", "self", ")", ":", "if", "self", ".", "dataset_format", ":", "return", "\".\"", ".", "join", "(", "[", "self", ".", "dataset_name", ",", "self", ".", "dataset_format", "]", ")", "return", "self", ".", "dataset_name" ]
Qualify the dataset_name with the dataset_format if supplied :return: dataset name :rtype: str
[ "Qualify", "the", "dataset_name", "with", "the", "dataset_format", "if", "supplied", ":", "return", ":", "dataset", "name", ":", "rtype", ":", "str" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/__init__.py#L797-L805
mdsol/rwslib
rwslib/rws_requests/biostats_gateway.py
check_dataset_format
def check_dataset_format(ds_format): """ Ensure dataset format is XML or CSV :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) """ if ds_format.lower() not in DATASET_FORMATS.keys(): raise ValueError( "dataset_format is expected to be one of %s. '%s' is not valid" % (", ".join(DATASET_FORMATS.keys()), ds_format) )
python
def check_dataset_format(ds_format): """ Ensure dataset format is XML or CSV :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) """ if ds_format.lower() not in DATASET_FORMATS.keys(): raise ValueError( "dataset_format is expected to be one of %s. '%s' is not valid" % (", ".join(DATASET_FORMATS.keys()), ds_format) )
[ "def", "check_dataset_format", "(", "ds_format", ")", ":", "if", "ds_format", ".", "lower", "(", ")", "not", "in", "DATASET_FORMATS", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"dataset_format is expected to be one of %s. '%s' is not valid\"", "%", "(", "\", \"", ".", "join", "(", "DATASET_FORMATS", ".", "keys", "(", ")", ")", ",", "ds_format", ")", ")" ]
Ensure dataset format is XML or CSV :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`)
[ "Ensure", "dataset", "format", "is", "XML", "or", "CSV" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/biostats_gateway.py#L16-L26
mdsol/rwslib
rwslib/rws_requests/biostats_gateway.py
dataset_format_to_extension
def dataset_format_to_extension(ds_format): """ Get the preferred Dataset format extension :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) :rtype: str """ try: return DATASET_FORMATS[ds_format] except KeyError: raise ValueError( "dataset_format is expected to be one of %s. '%s' is not valid" % (", ".join(DATASET_FORMATS.keys()), ds_format) )
python
def dataset_format_to_extension(ds_format): """ Get the preferred Dataset format extension :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) :rtype: str """ try: return DATASET_FORMATS[ds_format] except KeyError: raise ValueError( "dataset_format is expected to be one of %s. '%s' is not valid" % (", ".join(DATASET_FORMATS.keys()), ds_format) )
[ "def", "dataset_format_to_extension", "(", "ds_format", ")", ":", "try", ":", "return", "DATASET_FORMATS", "[", "ds_format", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"dataset_format is expected to be one of %s. '%s' is not valid\"", "%", "(", "\", \"", ".", "join", "(", "DATASET_FORMATS", ".", "keys", "(", ")", ")", ",", "ds_format", ")", ")" ]
Get the preferred Dataset format extension :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) :rtype: str
[ "Get", "the", "preferred", "Dataset", "format", "extension" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/biostats_gateway.py#L29-L42
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
get_data
def get_data(ctx, study, environment, subject): """ Call rwscmd_getdata custom dataset to retrieve currently enterable, empty fields """ cfg = GetDataConfigurableDataset(GET_DATA_DATASET, study, environment, subject, params=dict(IncludeIDs=0, IncludeValues=0)) # path = "datasets/{}?StudyOID={}&SubjectKey={}" \ # "&IncludeIDs=0&IncludeValues=0".format(GET_DATA_DATASET, studyoid, subject) # url = make_url(ctx.obj['RWS'].base_url, path) if ctx.obj['VERBOSE']: click.echo('Getting data list') # Get the client instance client = ctx.obj['RWS'] #: type: RWSConnection # Client rolls in the base_url resp = client.send_request(cfg) # resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD'])) if client.last_result.status_code != 200: click.echo(client.last_result.text) return xml_pretty_print(resp)
python
def get_data(ctx, study, environment, subject): """ Call rwscmd_getdata custom dataset to retrieve currently enterable, empty fields """ cfg = GetDataConfigurableDataset(GET_DATA_DATASET, study, environment, subject, params=dict(IncludeIDs=0, IncludeValues=0)) # path = "datasets/{}?StudyOID={}&SubjectKey={}" \ # "&IncludeIDs=0&IncludeValues=0".format(GET_DATA_DATASET, studyoid, subject) # url = make_url(ctx.obj['RWS'].base_url, path) if ctx.obj['VERBOSE']: click.echo('Getting data list') # Get the client instance client = ctx.obj['RWS'] #: type: RWSConnection # Client rolls in the base_url resp = client.send_request(cfg) # resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD'])) if client.last_result.status_code != 200: click.echo(client.last_result.text) return xml_pretty_print(resp)
[ "def", "get_data", "(", "ctx", ",", "study", ",", "environment", ",", "subject", ")", ":", "cfg", "=", "GetDataConfigurableDataset", "(", "GET_DATA_DATASET", ",", "study", ",", "environment", ",", "subject", ",", "params", "=", "dict", "(", "IncludeIDs", "=", "0", ",", "IncludeValues", "=", "0", ")", ")", "# path = \"datasets/{}?StudyOID={}&SubjectKey={}\" \\", "# \"&IncludeIDs=0&IncludeValues=0\".format(GET_DATA_DATASET, studyoid, subject)", "# url = make_url(ctx.obj['RWS'].base_url, path)", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Getting data list'", ")", "# Get the client instance", "client", "=", "ctx", ".", "obj", "[", "'RWS'", "]", "#: type: RWSConnection", "# Client rolls in the base_url", "resp", "=", "client", ".", "send_request", "(", "cfg", ")", "# resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD']))", "if", "client", ".", "last_result", ".", "status_code", "!=", "200", ":", "click", ".", "echo", "(", "client", ".", "last_result", ".", "text", ")", "return", "xml_pretty_print", "(", "resp", ")" ]
Call rwscmd_getdata custom dataset to retrieve currently enterable, empty fields
[ "Call", "rwscmd_getdata", "custom", "dataset", "to", "retrieve", "currently", "enterable", "empty", "fields" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L66-L91
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
rws_call
def rws_call(ctx, method, default_attr=None): """Make request to RWS""" try: response = ctx.obj['RWS'].send_request(method) if ctx.obj['RAW']: # use response from RWS result = ctx.obj['RWS'].last_result.text elif default_attr is not None: # human-readable summary result = "" for item in response: result = result + item.__dict__[default_attr] + "\n" else: # use response from RWS result = ctx.obj['RWS'].last_result.text if ctx.obj['OUTPUT']: # write to file ctx.obj['OUTPUT'].write(result.encode('utf-8')) else: # echo click.echo(result) except RWSException as e: click.echo(str(e))
python
def rws_call(ctx, method, default_attr=None): """Make request to RWS""" try: response = ctx.obj['RWS'].send_request(method) if ctx.obj['RAW']: # use response from RWS result = ctx.obj['RWS'].last_result.text elif default_attr is not None: # human-readable summary result = "" for item in response: result = result + item.__dict__[default_attr] + "\n" else: # use response from RWS result = ctx.obj['RWS'].last_result.text if ctx.obj['OUTPUT']: # write to file ctx.obj['OUTPUT'].write(result.encode('utf-8')) else: # echo click.echo(result) except RWSException as e: click.echo(str(e))
[ "def", "rws_call", "(", "ctx", ",", "method", ",", "default_attr", "=", "None", ")", ":", "try", ":", "response", "=", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "send_request", "(", "method", ")", "if", "ctx", ".", "obj", "[", "'RAW'", "]", ":", "# use response from RWS", "result", "=", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "last_result", ".", "text", "elif", "default_attr", "is", "not", "None", ":", "# human-readable summary", "result", "=", "\"\"", "for", "item", "in", "response", ":", "result", "=", "result", "+", "item", ".", "__dict__", "[", "default_attr", "]", "+", "\"\\n\"", "else", ":", "# use response from RWS", "result", "=", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "last_result", ".", "text", "if", "ctx", ".", "obj", "[", "'OUTPUT'", "]", ":", "# write to file", "ctx", ".", "obj", "[", "'OUTPUT'", "]", ".", "write", "(", "result", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "# echo", "click", ".", "echo", "(", "result", ")", "except", "RWSException", "as", "e", ":", "click", ".", "echo", "(", "str", "(", "e", ")", ")" ]
Make request to RWS
[ "Make", "request", "to", "RWS" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L94-L114
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
data
def data(ctx, path): """List EDC data for [STUDY] [ENV] [SUBJECT]""" _rws = partial(rws_call, ctx) if len(path) == 0: _rws(ClinicalStudiesRequest(), default_attr='oid') elif len(path) == 1: _rws(StudySubjectsRequest(path[0], 'Prod'), default_attr='subjectkey') elif len(path) == 2: _rws(StudySubjectsRequest(path[0], path[1]), default_attr='subjectkey') elif len(path) == 3: try: click.echo(get_data(ctx, path[0], path[1], path[2])) except RWSException as e: click.echo(str(e)) except requests.exceptions.HTTPError as e: click.echo(str(e)) else: click.echo('Too many arguments')
python
def data(ctx, path): """List EDC data for [STUDY] [ENV] [SUBJECT]""" _rws = partial(rws_call, ctx) if len(path) == 0: _rws(ClinicalStudiesRequest(), default_attr='oid') elif len(path) == 1: _rws(StudySubjectsRequest(path[0], 'Prod'), default_attr='subjectkey') elif len(path) == 2: _rws(StudySubjectsRequest(path[0], path[1]), default_attr='subjectkey') elif len(path) == 3: try: click.echo(get_data(ctx, path[0], path[1], path[2])) except RWSException as e: click.echo(str(e)) except requests.exceptions.HTTPError as e: click.echo(str(e)) else: click.echo('Too many arguments')
[ "def", "data", "(", "ctx", ",", "path", ")", ":", "_rws", "=", "partial", "(", "rws_call", ",", "ctx", ")", "if", "len", "(", "path", ")", "==", "0", ":", "_rws", "(", "ClinicalStudiesRequest", "(", ")", ",", "default_attr", "=", "'oid'", ")", "elif", "len", "(", "path", ")", "==", "1", ":", "_rws", "(", "StudySubjectsRequest", "(", "path", "[", "0", "]", ",", "'Prod'", ")", ",", "default_attr", "=", "'subjectkey'", ")", "elif", "len", "(", "path", ")", "==", "2", ":", "_rws", "(", "StudySubjectsRequest", "(", "path", "[", "0", "]", ",", "path", "[", "1", "]", ")", ",", "default_attr", "=", "'subjectkey'", ")", "elif", "len", "(", "path", ")", "==", "3", ":", "try", ":", "click", ".", "echo", "(", "get_data", "(", "ctx", ",", "path", "[", "0", "]", ",", "path", "[", "1", "]", ",", "path", "[", "2", "]", ")", ")", "except", "RWSException", "as", "e", ":", "click", ".", "echo", "(", "str", "(", "e", ")", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "click", ".", "echo", "(", "str", "(", "e", ")", ")", "else", ":", "click", ".", "echo", "(", "'Too many arguments'", ")" ]
List EDC data for [STUDY] [ENV] [SUBJECT]
[ "List", "EDC", "data", "for", "[", "STUDY", "]", "[", "ENV", "]", "[", "SUBJECT", "]" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L127-L144
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
post
def post(ctx, odm): """Post ODM clinical data""" try: ctx.obj['RWS'].send_request(PostDataRequest(odm.read())) if ctx.obj['RAW']: click.echo(ctx.obj['RWS'].last_result.text) except RWSException as e: click.echo(e.message)
python
def post(ctx, odm): """Post ODM clinical data""" try: ctx.obj['RWS'].send_request(PostDataRequest(odm.read())) if ctx.obj['RAW']: click.echo(ctx.obj['RWS'].last_result.text) except RWSException as e: click.echo(e.message)
[ "def", "post", "(", "ctx", ",", "odm", ")", ":", "try", ":", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "send_request", "(", "PostDataRequest", "(", "odm", ".", "read", "(", ")", ")", ")", "if", "ctx", ".", "obj", "[", "'RAW'", "]", ":", "click", ".", "echo", "(", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "last_result", ".", "text", ")", "except", "RWSException", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "message", ")" ]
Post ODM clinical data
[ "Post", "ODM", "clinical", "data" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L150-L157
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
metadata
def metadata(ctx, drafts, path): """List metadata for [PROJECT] [VERSION]""" _rws = partial(rws_call, ctx) if len(path) == 0: _rws(MetadataStudiesRequest(), default_attr='oid') elif len(path) == 1: if drafts: _rws(StudyDraftsRequest(path[0]), default_attr='oid') else: _rws(StudyVersionsRequest(path[0]), default_attr='oid') elif len(path) == 2: _rws(StudyVersionRequest(path[0], path[1])) else: click.echo('Too many arguments')
python
def metadata(ctx, drafts, path): """List metadata for [PROJECT] [VERSION]""" _rws = partial(rws_call, ctx) if len(path) == 0: _rws(MetadataStudiesRequest(), default_attr='oid') elif len(path) == 1: if drafts: _rws(StudyDraftsRequest(path[0]), default_attr='oid') else: _rws(StudyVersionsRequest(path[0]), default_attr='oid') elif len(path) == 2: _rws(StudyVersionRequest(path[0], path[1])) else: click.echo('Too many arguments')
[ "def", "metadata", "(", "ctx", ",", "drafts", ",", "path", ")", ":", "_rws", "=", "partial", "(", "rws_call", ",", "ctx", ")", "if", "len", "(", "path", ")", "==", "0", ":", "_rws", "(", "MetadataStudiesRequest", "(", ")", ",", "default_attr", "=", "'oid'", ")", "elif", "len", "(", "path", ")", "==", "1", ":", "if", "drafts", ":", "_rws", "(", "StudyDraftsRequest", "(", "path", "[", "0", "]", ")", ",", "default_attr", "=", "'oid'", ")", "else", ":", "_rws", "(", "StudyVersionsRequest", "(", "path", "[", "0", "]", ")", ",", "default_attr", "=", "'oid'", ")", "elif", "len", "(", "path", ")", "==", "2", ":", "_rws", "(", "StudyVersionRequest", "(", "path", "[", "0", "]", ",", "path", "[", "1", "]", ")", ")", "else", ":", "click", ".", "echo", "(", "'Too many arguments'", ")" ]
List metadata for [PROJECT] [VERSION]
[ "List", "metadata", "for", "[", "PROJECT", "]", "[", "VERSION", "]" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L164-L177
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
direct
def direct(ctx, path): """Make direct call to RWS, bypassing rwslib""" try: url = make_url(ctx.obj['RWS'].base_url, path) resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD'])) click.echo(resp.text) except RWSException as e: click.echo(e.message) except requests.exceptions.HTTPError as e: click.echo(e.message)
python
def direct(ctx, path): """Make direct call to RWS, bypassing rwslib""" try: url = make_url(ctx.obj['RWS'].base_url, path) resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD'])) click.echo(resp.text) except RWSException as e: click.echo(e.message) except requests.exceptions.HTTPError as e: click.echo(e.message)
[ "def", "direct", "(", "ctx", ",", "path", ")", ":", "try", ":", "url", "=", "make_url", "(", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "base_url", ",", "path", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "HTTPBasicAuth", "(", "ctx", ".", "obj", "[", "'USERNAME'", "]", ",", "ctx", ".", "obj", "[", "'PASSWORD'", "]", ")", ")", "click", ".", "echo", "(", "resp", ".", "text", ")", "except", "RWSException", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "message", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "message", ")" ]
Make direct call to RWS, bypassing rwslib
[ "Make", "direct", "call", "to", "RWS", "bypassing", "rwslib" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L183-L192
mdsol/rwslib
rwslib/extras/rwscmd/rwscmd.py
autofill
def autofill(ctx, steps, metadata, fixed, study, environment, subject): """Request enterable data for a subject, generate data values and post back to Rave. Requires 'rwscmd_getdata' configurable dataset to be installed on the Rave URL.""" if metadata is not None: # Read metadata from file, if supplied odm_metadata = metadata.read() meta_v = etree.fromstring(odm_metadata).find('.//' + E_ODM.METADATA_VERSION.value).get(A_ODM.OID.value) else: odm_metadata = None meta_v = None fixed_values = {} if fixed is not None: # Read fixed values from file, if supplied for f in fixed: oid, value = f.decode().split(',') fixed_values[oid] = value if ctx.obj['VERBOSE']: click.echo('Fixing {} to value: {}'.format(oid, value)) try: for n in range(0, steps): if ctx.obj['VERBOSE']: click.echo('Step {}'.format(str(n + 1))) # Get currently enterable fields for this subject subject_data = get_data(ctx, study, environment, subject) subject_data_odm = etree.fromstring(subject_data) if subject_data_odm.find('.//' + E_ODM.CLINICAL_DATA.value) is None: if ctx.obj['VERBOSE']: click.echo('No data found') break # Get the metadata version for the subject subject_meta_v = subject_data_odm.find('.//' + E_ODM.CLINICAL_DATA.value).get( A_ODM.METADATA_VERSION_OID.value) if subject_meta_v is None: if ctx.obj['VERBOSE']: click.echo('Subject not found') break # If no metadata supplied, or versions don't match, retrieve metadata from RWS if meta_v != subject_meta_v: if ctx.obj['VERBOSE']: click.echo('Getting metadata version {}'.format(subject_meta_v)) ctx.obj['RWS'].send_request(StudyVersionRequest(study, subject_meta_v)) odm_metadata = ctx.obj['RWS'].last_result.text meta_v = subject_meta_v # Generate data values to fill in empty fields if ctx.obj['VERBOSE']: click.echo('Generating data') scr = Scramble(odm_metadata) odm = scr.fill_empty(fixed_values, subject_data) # If new data values, post to RWS if etree.fromstring(odm).find('.//' + E_ODM.ITEM_DATA.value) is None: if ctx.obj['VERBOSE']: click.echo('No data to send') break ctx.obj['RWS'].send_request(PostDataRequest(odm)) if ctx.obj['RAW']: click.echo(ctx.obj['RWS'].last_result.text) except RWSException as e: click.echo(e.rws_error) except requests.exceptions.HTTPError as e: click.echo(e.strerror)
python
def autofill(ctx, steps, metadata, fixed, study, environment, subject): """Request enterable data for a subject, generate data values and post back to Rave. Requires 'rwscmd_getdata' configurable dataset to be installed on the Rave URL.""" if metadata is not None: # Read metadata from file, if supplied odm_metadata = metadata.read() meta_v = etree.fromstring(odm_metadata).find('.//' + E_ODM.METADATA_VERSION.value).get(A_ODM.OID.value) else: odm_metadata = None meta_v = None fixed_values = {} if fixed is not None: # Read fixed values from file, if supplied for f in fixed: oid, value = f.decode().split(',') fixed_values[oid] = value if ctx.obj['VERBOSE']: click.echo('Fixing {} to value: {}'.format(oid, value)) try: for n in range(0, steps): if ctx.obj['VERBOSE']: click.echo('Step {}'.format(str(n + 1))) # Get currently enterable fields for this subject subject_data = get_data(ctx, study, environment, subject) subject_data_odm = etree.fromstring(subject_data) if subject_data_odm.find('.//' + E_ODM.CLINICAL_DATA.value) is None: if ctx.obj['VERBOSE']: click.echo('No data found') break # Get the metadata version for the subject subject_meta_v = subject_data_odm.find('.//' + E_ODM.CLINICAL_DATA.value).get( A_ODM.METADATA_VERSION_OID.value) if subject_meta_v is None: if ctx.obj['VERBOSE']: click.echo('Subject not found') break # If no metadata supplied, or versions don't match, retrieve metadata from RWS if meta_v != subject_meta_v: if ctx.obj['VERBOSE']: click.echo('Getting metadata version {}'.format(subject_meta_v)) ctx.obj['RWS'].send_request(StudyVersionRequest(study, subject_meta_v)) odm_metadata = ctx.obj['RWS'].last_result.text meta_v = subject_meta_v # Generate data values to fill in empty fields if ctx.obj['VERBOSE']: click.echo('Generating data') scr = Scramble(odm_metadata) odm = scr.fill_empty(fixed_values, subject_data) # If new data values, post to RWS if etree.fromstring(odm).find('.//' + E_ODM.ITEM_DATA.value) is None: if ctx.obj['VERBOSE']: click.echo('No data to send') break ctx.obj['RWS'].send_request(PostDataRequest(odm)) if ctx.obj['RAW']: click.echo(ctx.obj['RWS'].last_result.text) except RWSException as e: click.echo(e.rws_error) except requests.exceptions.HTTPError as e: click.echo(e.strerror)
[ "def", "autofill", "(", "ctx", ",", "steps", ",", "metadata", ",", "fixed", ",", "study", ",", "environment", ",", "subject", ")", ":", "if", "metadata", "is", "not", "None", ":", "# Read metadata from file, if supplied", "odm_metadata", "=", "metadata", ".", "read", "(", ")", "meta_v", "=", "etree", ".", "fromstring", "(", "odm_metadata", ")", ".", "find", "(", "'.//'", "+", "E_ODM", ".", "METADATA_VERSION", ".", "value", ")", ".", "get", "(", "A_ODM", ".", "OID", ".", "value", ")", "else", ":", "odm_metadata", "=", "None", "meta_v", "=", "None", "fixed_values", "=", "{", "}", "if", "fixed", "is", "not", "None", ":", "# Read fixed values from file, if supplied", "for", "f", "in", "fixed", ":", "oid", ",", "value", "=", "f", ".", "decode", "(", ")", ".", "split", "(", "','", ")", "fixed_values", "[", "oid", "]", "=", "value", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Fixing {} to value: {}'", ".", "format", "(", "oid", ",", "value", ")", ")", "try", ":", "for", "n", "in", "range", "(", "0", ",", "steps", ")", ":", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Step {}'", ".", "format", "(", "str", "(", "n", "+", "1", ")", ")", ")", "# Get currently enterable fields for this subject", "subject_data", "=", "get_data", "(", "ctx", ",", "study", ",", "environment", ",", "subject", ")", "subject_data_odm", "=", "etree", ".", "fromstring", "(", "subject_data", ")", "if", "subject_data_odm", ".", "find", "(", "'.//'", "+", "E_ODM", ".", "CLINICAL_DATA", ".", "value", ")", "is", "None", ":", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'No data found'", ")", "break", "# Get the metadata version for the subject", "subject_meta_v", "=", "subject_data_odm", ".", "find", "(", "'.//'", "+", "E_ODM", ".", "CLINICAL_DATA", ".", "value", ")", ".", "get", "(", "A_ODM", ".", "METADATA_VERSION_OID", ".", "value", ")", "if", "subject_meta_v", "is", "None", ":", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Subject not found'", ")", "break", "# If no metadata supplied, or versions don't match, retrieve metadata from RWS", "if", "meta_v", "!=", "subject_meta_v", ":", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Getting metadata version {}'", ".", "format", "(", "subject_meta_v", ")", ")", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "send_request", "(", "StudyVersionRequest", "(", "study", ",", "subject_meta_v", ")", ")", "odm_metadata", "=", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "last_result", ".", "text", "meta_v", "=", "subject_meta_v", "# Generate data values to fill in empty fields", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'Generating data'", ")", "scr", "=", "Scramble", "(", "odm_metadata", ")", "odm", "=", "scr", ".", "fill_empty", "(", "fixed_values", ",", "subject_data", ")", "# If new data values, post to RWS", "if", "etree", ".", "fromstring", "(", "odm", ")", ".", "find", "(", "'.//'", "+", "E_ODM", ".", "ITEM_DATA", ".", "value", ")", "is", "None", ":", "if", "ctx", ".", "obj", "[", "'VERBOSE'", "]", ":", "click", ".", "echo", "(", "'No data to send'", ")", "break", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "send_request", "(", "PostDataRequest", "(", "odm", ")", ")", "if", "ctx", ".", "obj", "[", "'RAW'", "]", ":", "click", ".", "echo", "(", "ctx", ".", "obj", "[", "'RWS'", "]", ".", "last_result", ".", "text", ")", "except", "RWSException", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "rws_error", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "strerror", ")" ]
Request enterable data for a subject, generate data values and post back to Rave. Requires 'rwscmd_getdata' configurable dataset to be installed on the Rave URL.
[ "Request", "enterable", "data", "for", "a", "subject", "generate", "data", "values", "and", "post", "back", "to", "Rave", ".", "Requires", "rwscmd_getdata", "configurable", "dataset", "to", "be", "installed", "on", "the", "Rave", "URL", "." ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/rwscmd.py#L205-L274
mdsol/rwslib
rwslib/builders/metadata.py
Study.build
def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid) params["mdsol:ProjectType"] = self.project_type builder.start("Study", params) # Ask children if self.global_variables is not None: self.global_variables.build(builder) if self.basic_definitions is not None: self.basic_definitions.build(builder) if self.metadata_version is not None: self.metadata_version.build(builder) builder.end("Study")
python
def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid) params["mdsol:ProjectType"] = self.project_type builder.start("Study", params) # Ask children if self.global_variables is not None: self.global_variables.build(builder) if self.basic_definitions is not None: self.basic_definitions.build(builder) if self.metadata_version is not None: self.metadata_version.build(builder) builder.end("Study")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "dict", "(", "OID", "=", "self", ".", "oid", ")", "params", "[", "\"mdsol:ProjectType\"", "]", "=", "self", ".", "project_type", "builder", ".", "start", "(", "\"Study\"", ",", "params", ")", "# Ask children", "if", "self", ".", "global_variables", "is", "not", "None", ":", "self", ".", "global_variables", ".", "build", "(", "builder", ")", "if", "self", ".", "basic_definitions", "is", "not", "None", ":", "self", ".", "basic_definitions", ".", "build", "(", "builder", ")", "if", "self", ".", "metadata_version", "is", "not", "None", ":", "self", ".", "metadata_version", ".", "build", "(", "builder", ")", "builder", ".", "end", "(", "\"Study\"", ")" ]
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L54-L71