Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
0
getsentry/libsourcemap
libsourcemap/highlevel.py
View.get_original_function_name
def get_original_function_name(self, line, col, minified_name, minified_source): """Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope. """ # Silently ignore underflows if line < 0 or col < 0: return None minified_name = minified_name.encode('utf-8') sout = _ffi.new('const char **') try: slen = rustcall(_lib.lsm_view_get_original_function_name, self._get_ptr(), line, col, minified_name, minified_source, sout) if slen > 0: return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace') except SourceMapError: # In some rare cases the library is/was known to panic. We do # not want to report this upwards (this happens on slicing # out of range on older rust versions in the rust-sourcemap # library) pass
python
def get_original_function_name(self, line, col, minified_name, minified_source): """Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope. """ # Silently ignore underflows if line < 0 or col < 0: return None minified_name = minified_name.encode('utf-8') sout = _ffi.new('const char **') try: slen = rustcall(_lib.lsm_view_get_original_function_name, self._get_ptr(), line, col, minified_name, minified_source, sout) if slen > 0: return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace') except SourceMapError: # In some rare cases the library is/was known to panic. We do # not want to report this upwards (this happens on slicing # out of range on older rust versions in the rust-sourcemap # library) pass
['def', 'get_original_function_name', '(', 'self', ',', 'line', ',', 'col', ',', 'minified_name', ',', 'minified_source', ')', ':', '# Silently ignore underflows', 'if', 'line', '<', '0', 'or', 'col', '<', '0', ':', 'return', 'None', 'minified_name', '=', 'minified_name', '.', 'encode', '(', "'utf-8'", ')', 'sout', '=', '_ffi', '.', 'new', '(', "'const char **'", ')', 'try', ':', 'slen', '=', 'rustcall', '(', '_lib', '.', 'lsm_view_get_original_function_name', ',', 'self', '.', '_get_ptr', '(', ')', ',', 'line', ',', 'col', ',', 'minified_name', ',', 'minified_source', ',', 'sout', ')', 'if', 'slen', '>', '0', ':', 'return', '_ffi', '.', 'unpack', '(', 'sout', '[', '0', ']', ',', 'slen', ')', '.', 'decode', '(', "'utf-8'", ',', "'replace'", ')', 'except', 'SourceMapError', ':', '# In some rare cases the library is/was known to panic. We do', '# not want to report this upwards (this happens on slicing', '# out of range on older rust versions in the rust-sourcemap', '# library)', 'pass']
Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope.
['Given', 'a', 'token', 'location', 'and', 'a', 'minified', 'function', 'name', 'and', 'the', 'minified', 'source', 'file', 'this', 'returns', 'the', 'original', 'function', 'name', 'if', 'it', 'can', 'be', 'found', 'of', 'the', 'minified', 'function', 'in', 'scope', '.']
train
https://github.com/getsentry/libsourcemap/blob/94b5a34814fafee9dc23da8ec0ccca77f30e3370/libsourcemap/highlevel.py#L163-L185
1
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py
brocade_mac_address_table.get_mac_address_table_input_request_type_get_interface_based_request_mac_type
def get_mac_address_table_input_request_type_get_interface_based_request_mac_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_interface_based_request = ET.SubElement(request_type, "get-interface-based-request") mac_type = ET.SubElement(get_interface_based_request, "mac-type") mac_type.text = kwargs.pop('mac_type') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_mac_address_table_input_request_type_get_interface_based_request_mac_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_interface_based_request = ET.SubElement(request_type, "get-interface-based-request") mac_type = ET.SubElement(get_interface_based_request, "mac-type") mac_type.text = kwargs.pop('mac_type') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_mac_address_table_input_request_type_get_interface_based_request_mac_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_mac_address_table', '=', 'ET', '.', 'Element', '(', '"get_mac_address_table"', ')', 'config', '=', 'get_mac_address_table', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_mac_address_table', ',', '"input"', ')', 'request_type', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"request-type"', ')', 'get_interface_based_request', '=', 'ET', '.', 'SubElement', '(', 'request_type', ',', '"get-interface-based-request"', ')', 'mac_type', '=', 'ET', '.', 'SubElement', '(', 'get_interface_based_request', ',', '"mac-type"', ')', 'mac_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'mac_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py#L297-L310
2
nicolargo/glances
glances/plugins/glances_memswap.py
Plugin.update_views
def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert and log self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])
python
def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert and log self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])
['def', 'update_views', '(', 'self', ')', ':', "# Call the father's method", 'super', '(', 'Plugin', ',', 'self', ')', '.', 'update_views', '(', ')', '# Add specifics informations', '# Alert and log', 'self', '.', 'views', '[', "'used'", ']', '[', "'decoration'", ']', '=', 'self', '.', 'get_alert_log', '(', 'self', '.', 'stats', '[', "'used'", ']', ',', 'maximum', '=', 'self', '.', 'stats', '[', "'total'", ']', ')']
Update stats views.
['Update', 'stats', 'views', '.']
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_memswap.py#L130-L137
3
DLR-RM/RAFCON
source/rafcon/gui/helpers/meta_data.py
contains_geometric_info
def contains_geometric_info(var): """ Check whether the passed variable is a tuple with two floats or integers """ return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var)
python
def contains_geometric_info(var): """ Check whether the passed variable is a tuple with two floats or integers """ return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var)
['def', 'contains_geometric_info', '(', 'var', ')', ':', 'return', 'isinstance', '(', 'var', ',', 'tuple', ')', 'and', 'len', '(', 'var', ')', '==', '2', 'and', 'all', '(', 'isinstance', '(', 'val', ',', '(', 'int', ',', 'float', ')', ')', 'for', 'val', 'in', 'var', ')']
Check whether the passed variable is a tuple with two floats or integers
['Check', 'whether', 'the', 'passed', 'variable', 'is', 'a', 'tuple', 'with', 'two', 'floats', 'or', 'integers']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/meta_data.py#L55-L57
4
ejeschke/ginga
ginga/gtk3w/ImageViewGtk.py
ImageViewGtk.save_plain_image_as_file
def save_plain_image_as_file(self, filepath, format='png', quality=90): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == 'jpeg': options.append('quality') values.append(str(quality)) pixbuf.savev(filepath, format, options, values)
python
def save_plain_image_as_file(self, filepath, format='png', quality=90): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == 'jpeg': options.append('quality') values.append(str(quality)) pixbuf.savev(filepath, format, options, values)
['def', 'save_plain_image_as_file', '(', 'self', ',', 'filepath', ',', 'format', '=', "'png'", ',', 'quality', '=', '90', ')', ':', 'pixbuf', '=', 'self', '.', 'get_plain_image_as_pixbuf', '(', ')', 'options', ',', 'values', '=', '[', ']', ',', '[', ']', 'if', 'format', '==', "'jpeg'", ':', 'options', '.', 'append', '(', "'quality'", ')', 'values', '.', 'append', '(', 'str', '(', 'quality', ')', ')', 'pixbuf', '.', 'savev', '(', 'filepath', ',', 'format', ',', 'options', ',', 'values', ')']
Used for generating thumbnails. Does not include overlaid graphics.
['Used', 'for', 'generating', 'thumbnails', '.', 'Does', 'not', 'include', 'overlaid', 'graphics', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/gtk3w/ImageViewGtk.py#L75-L84
5
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
simxClearFloatSignal
def simxClearFloatSignal(clientID, signalName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(signalName) is str): signalName=signalName.encode('utf-8') return c_ClearFloatSignal(clientID, signalName, operationMode)
python
def simxClearFloatSignal(clientID, signalName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(signalName) is str): signalName=signalName.encode('utf-8') return c_ClearFloatSignal(clientID, signalName, operationMode)
['def', 'simxClearFloatSignal', '(', 'clientID', ',', 'signalName', ',', 'operationMode', ')', ':', 'if', '(', 'sys', '.', 'version_info', '[', '0', ']', '==', '3', ')', 'and', '(', 'type', '(', 'signalName', ')', 'is', 'str', ')', ':', 'signalName', '=', 'signalName', '.', 'encode', '(', "'utf-8'", ')', 'return', 'c_ClearFloatSignal', '(', 'clientID', ',', 'signalName', ',', 'operationMode', ')']
Please have a look at the function description/documentation in the V-REP user manual
['Please', 'have', 'a', 'look', 'at', 'the', 'function', 'description', '/', 'documentation', 'in', 'the', 'V', '-', 'REP', 'user', 'manual']
train
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L900-L907
6
cloudify-cosmo/repex
repex.py
Repex.find_matches
def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))
python
def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))
['def', 'find_matches', '(', 'self', ',', 'content', ',', 'file_to_handle', ')', ':', '# look for all match groups in the content', 'groups', '=', '[', 'match', '.', 'groupdict', '(', ')', 'for', 'match', 'in', 'self', '.', 'match_expression', '.', 'finditer', '(', 'content', ')', ']', '# filter out content not in the matchgroup', 'matches', '=', '[', 'group', '[', "'matchgroup'", ']', 'for', 'group', 'in', 'groups', 'if', 'group', '.', 'get', '(', "'matchgroup'", ')', ']', 'logger', '.', 'info', '(', "'Found %s matches in %s'", ',', 'len', '(', 'matches', ')', ',', 'file_to_handle', ')', "# We only need the unique strings found as we'll be replacing each", '# of them. No need to replace the ones already replaced.', 'return', 'list', '(', 'set', '(', 'matches', ')', ')']
Find all matches of an expression in a file
['Find', 'all', 'matches', 'of', 'an', 'expression', 'in', 'a', 'file']
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L605-L618
7
mariocj89/github-token
github_token/__init__.py
TokenFactory.create
def create(self): """Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before """ headers = dict() if self.tfa_token: headers["X-GitHub-OTP"] = self.tfa_token token_name = self.app_name + platform.node() # node specific in case the user has multiple hosts payload = dict(note=token_name, scopes=self.scopes) response = requests.post( self.api_url + "authorizations", auth=(self.user, self.password), headers=headers, json=payload ) if response.status_code == 401 and "required" in response.headers.get("X-GitHub-OTP", ""): raise TFARequired("TFA required for the user") if response.status_code == 422: raise AlreadyExistsError("APP already exists. Please delete {} token".format(token_name)) if response.status_code == 401: raise BadPassword("Bad User/Password") response.raise_for_status() return response.json()["token"]
python
def create(self): """Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before """ headers = dict() if self.tfa_token: headers["X-GitHub-OTP"] = self.tfa_token token_name = self.app_name + platform.node() # node specific in case the user has multiple hosts payload = dict(note=token_name, scopes=self.scopes) response = requests.post( self.api_url + "authorizations", auth=(self.user, self.password), headers=headers, json=payload ) if response.status_code == 401 and "required" in response.headers.get("X-GitHub-OTP", ""): raise TFARequired("TFA required for the user") if response.status_code == 422: raise AlreadyExistsError("APP already exists. Please delete {} token".format(token_name)) if response.status_code == 401: raise BadPassword("Bad User/Password") response.raise_for_status() return response.json()["token"]
['def', 'create', '(', 'self', ')', ':', 'headers', '=', 'dict', '(', ')', 'if', 'self', '.', 'tfa_token', ':', 'headers', '[', '"X-GitHub-OTP"', ']', '=', 'self', '.', 'tfa_token', 'token_name', '=', 'self', '.', 'app_name', '+', 'platform', '.', 'node', '(', ')', '# node specific in case the user has multiple hosts', 'payload', '=', 'dict', '(', 'note', '=', 'token_name', ',', 'scopes', '=', 'self', '.', 'scopes', ')', 'response', '=', 'requests', '.', 'post', '(', 'self', '.', 'api_url', '+', '"authorizations"', ',', 'auth', '=', '(', 'self', '.', 'user', ',', 'self', '.', 'password', ')', ',', 'headers', '=', 'headers', ',', 'json', '=', 'payload', ')', 'if', 'response', '.', 'status_code', '==', '401', 'and', '"required"', 'in', 'response', '.', 'headers', '.', 'get', '(', '"X-GitHub-OTP"', ',', '""', ')', ':', 'raise', 'TFARequired', '(', '"TFA required for the user"', ')', 'if', 'response', '.', 'status_code', '==', '422', ':', 'raise', 'AlreadyExistsError', '(', '"APP already exists. Please delete {} token"', '.', 'format', '(', 'token_name', ')', ')', 'if', 'response', '.', 'status_code', '==', '401', ':', 'raise', 'BadPassword', '(', '"Bad User/Password"', ')', 'response', '.', 'raise_for_status', '(', ')', 'return', 'response', '.', 'json', '(', ')', '[', '"token"', ']']
Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before
['Creates', 'a', 'token']
train
https://github.com/mariocj89/github-token/blob/8ca85fa51a52aef94cfb4f851eb229ee500bc28f/github_token/__init__.py#L81-L108
8
brycedrennan/eulerian-magnification
eulerian_magnification/base.py
combine_pyramid_and_save
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'): """Combine a gaussian video representation with the original and save to file""" width, height = get_frame_dimensions(orig_video[0]) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print("Outputting to %s" % save_filename) writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1) for x in range(0, g_video.shape[0]): img = np.ndarray(shape=g_video[x].shape, dtype='float') img[:] = g_video[x] for i in range(enlarge_multiple): img = cv2.pyrUp(img) img[:height, :width] = img[:height, :width] + orig_video[x] res = cv2.convertScaleAbs(img[:height, :width]) writer.write(res)
python
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'): """Combine a gaussian video representation with the original and save to file""" width, height = get_frame_dimensions(orig_video[0]) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print("Outputting to %s" % save_filename) writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1) for x in range(0, g_video.shape[0]): img = np.ndarray(shape=g_video[x].shape, dtype='float') img[:] = g_video[x] for i in range(enlarge_multiple): img = cv2.pyrUp(img) img[:height, :width] = img[:height, :width] + orig_video[x] res = cv2.convertScaleAbs(img[:height, :width]) writer.write(res)
['def', 'combine_pyramid_and_save', '(', 'g_video', ',', 'orig_video', ',', 'enlarge_multiple', ',', 'fps', ',', 'save_filename', '=', "'media/output.avi'", ')', ':', 'width', ',', 'height', '=', 'get_frame_dimensions', '(', 'orig_video', '[', '0', ']', ')', 'fourcc', '=', 'cv2', '.', 'VideoWriter_fourcc', '(', '*', "'MJPG'", ')', 'print', '(', '"Outputting to %s"', '%', 'save_filename', ')', 'writer', '=', 'cv2', '.', 'VideoWriter', '(', 'save_filename', ',', 'fourcc', ',', 'fps', ',', '(', 'width', ',', 'height', ')', ',', '1', ')', 'for', 'x', 'in', 'range', '(', '0', ',', 'g_video', '.', 'shape', '[', '0', ']', ')', ':', 'img', '=', 'np', '.', 'ndarray', '(', 'shape', '=', 'g_video', '[', 'x', ']', '.', 'shape', ',', 'dtype', '=', "'float'", ')', 'img', '[', ':', ']', '=', 'g_video', '[', 'x', ']', 'for', 'i', 'in', 'range', '(', 'enlarge_multiple', ')', ':', 'img', '=', 'cv2', '.', 'pyrUp', '(', 'img', ')', 'img', '[', ':', 'height', ',', ':', 'width', ']', '=', 'img', '[', ':', 'height', ',', ':', 'width', ']', '+', 'orig_video', '[', 'x', ']', 'res', '=', 'cv2', '.', 'convertScaleAbs', '(', 'img', '[', ':', 'height', ',', ':', 'width', ']', ')', 'writer', '.', 'write', '(', 'res', ')']
Combine a gaussian video representation with the original and save to file
['Combine', 'a', 'gaussian', 'video', 'representation', 'with', 'the', 'original', 'and', 'save', 'to', 'file']
train
https://github.com/brycedrennan/eulerian-magnification/blob/9ae0651fe3334176300d183f8240ad36d77759a9/eulerian_magnification/base.py#L108-L122
9
PmagPy/PmagPy
pmagpy/pmag.py
PintPars
def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs): """ calculate the paleointensity magic parameters make some definitions """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: meth_key = 'method_codes' beta_key = 'int_b_beta' temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') z_key = 'int_z' drats_key = 'int_drats' drat_key = 'int_drat' md_key = 'int_md' dec_key = 'dir_dec' inc_key = 'dir_inc' mad_key = 'int_mad_free' dang_key = 'int_dang' ptrm_key = 'int_n_ptrm' theta_key = 'int_theta' gamma_key = 'int_gamma' delta_key = 'int_delta' frac_key = 'int_frac' gmax_key = 'int_gmax' scat_key = 'int_scat' else: beta_key = 'specimen_b_beta' meth_key = 'magic_method_codes' temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max' z_key = 'specimen_z' drats_key = 'specimen_drats' drat_key = 'specimen_drat' md_key = 'specimen_md' dec_key = 'specimen_dec' inc_key = 'specimen_inc' mad_key = 'specimen_int_mad' dang_key = 'specimen_dang' ptrm_key = 'specimen_int_ptrm_n' theta_key = 'specimen_theta' gamma_key = 'specimen_gamma' delta_key = 'specimen_delta' frac_key = 'specimen_frac' gmax_key = 'specimen_gmax' scat_key = 'specimen_scat' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", "" zptrm_check = [] first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[ 0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5] if len(araiblock) > 6: # used only for perpendicular method of paleointensity ThetaChecks = araiblock[6] # used only for perpendicular method of paleointensity DeltaChecks = araiblock[7] xi, yi, diffcum = [], [], 0 xiz, xzi, yiz, yzi = [], [], [], [] Nptrm, dmax = 0, -1e-22 # check if even zero and infield steps if len(first_Z) > len(first_I): maxe = len(first_I) - 1 else: maxe = len(first_Z) - 1 if end == 0 or end > maxe: end = maxe # get the MAD, DANG, etc. for directional data bstep = araiblock[0][start][0] estep = araiblock[0][end][0] zstart, zend = 0, len(zijdblock) for k in range(len(zijdblock)): zrec = zijdblock[k] if zrec[0] == bstep: zstart = k if zrec[0] == estep: zend = k PCA = domean(zijdblock, zstart, zend, 'DE-BFL') D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz for rec in zijdblock: D.append((rec[1], rec[2], rec[3])) Du.append((rec[1], rec[2])) if rec[4] == 1: Dzi.append((rec[1], rec[2])) # if this is ZI step else: Diz.append((rec[1], rec[2])) # if this is IZ step # calculate the vector difference sum vds = dovds(D) b_zi, b_iz = [], [] # collect data included in ZigZag calculation if end + 1 >= len(first_Z): stop = end - 1 else: stop = end for k in range(start, end + 1): for l in range(len(first_I)): irec = first_I[l] if irec[0] == first_Z[k][0]: xi.append(irec[3]) yi.append(first_Z[k][3]) pars, errcode = int_pars(xi, yi, vds) if errcode == 1: return pars, errcode # for k in range(start,end+1): for k in range(len(first_Z) - 1): for l in range(k): # only go down to 10% of NRM..... if old_div(first_Z[k][3], vds) > 0.1: irec = first_I[l] if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step xzi = irec[3] yzi = first_Z[k][3] xiz = first_I[l + 1][3] yiz = first_Z[k + 1][3] slope = np.arctan2((yzi - yiz), (xiz - xzi)) r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2) if r > .1 * vds: b_zi.append(slope) # suppress noise elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step xiz = irec[3] yiz = first_Z[k][3] xzi = first_I[l + 1][3] yzi = first_Z[k + 1][3] slope = np.arctan2((yiz - yzi), (xzi - xiz)) r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2) if r > .1 * vds: b_iz.append(slope) # suppress noise # ZigZag, Frat, Trat = -1, 0, 0 if len(Diz) > 2 and len(Dzi) > 2: ZigZag = 0 dizp = fisher_mean(Diz) # get Fisher stats on IZ steps dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps dup = fisher_mean(Du) # get Fisher stats on all steps # # if directions are TOO well grouped, can get false positive for ftest, so # angles must be > 3 degrees apart. # if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.: F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \ (dup['n'] - dzip['r'] - dizp['r'] ) # Watson test for common mean nf = 2. * (dup['n'] - 2.) # number of degees of freedom ftest = fcalc(2, nf) Frat = old_div(F, ftest) if Frat > 1.: ZigZag = Frat # fails zigzag on directions methcode = "SM-FTEST" # now do slopes if len(b_zi) > 2 and len(b_iz) > 2: bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev biz_m, biz_sig = gausspars(b_iz) n_zi = float(len(b_zi)) n_iz = float(len(b_iz)) b_diff = abs(bzi_m - biz_m) # difference in means # # avoid false positives - set 3 degree slope difference here too if b_diff > 3 * np.pi / 180.: nf = n_zi + n_iz - 2. # degrees of freedom svar = old_div(((n_zi - 1.) * bzi_sig**2 + (n_iz - 1.) * biz_sig**2), nf) T = old_div((b_diff), np.sqrt( svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t ttest = tcalc(nf, .05) # t-test at 95% conf. Trat = old_div(T, ttest) if Trat > 1 and Trat > Frat: ZigZag = Trat # fails zigzag on directions methcode = "SM-TTEST" pars[z_key] = ZigZag pars[meth_key] = methcode # do drats if len(ptrm_check) != 0: diffcum, drat_max = 0, 0 for prec in ptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 2 # don't count alteration that happens after this step if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] if abs(prec[3] - irec[3]) > drat_max: drat_max = abs(prec[3] - irec[3]) pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3]) elif len(zptrm_check) != 0: diffcum = 0 for prec in zptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 1 if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) else: pars[drats_key] = -1 pars[drat_key] = -1 # and the pTRM tails if len(ptrm_tail) != 0: for trec in ptrm_tail: step = trec[0] for irec in first_I: if irec[0] == step: break if abs(trec[3]) > dmax: dmax = abs(trec[3]) pars[md_key] = (100 * dmax / vds) else: pars[md_key] = -1 pars[min_key] = bstep pars[max_key] = estep pars[dec_key] = PCA["specimen_dec"] pars[inc_key] = PCA["specimen_inc"] pars[mad_key] = PCA["specimen_mad"] pars[dang_key] = PCA["specimen_dang"] pars[ptrm_key] = Nptrm # and the ThetaChecks if ThetaChecks != "": t = 0 for theta in ThetaChecks: if theta[0] >= bstep and theta[0] <= estep and theta[1] > t: t = theta[1] pars[theta_key] = t else: pars[theta_key] = -1 # and the DeltaChecks if DeltaChecks != "": d = 0 for delta in DeltaChecks: if delta[0] >= bstep and delta[0] <= estep and delta[1] > d: d = delta[1] pars[delta_key] else: pars[delta_key] = -1 pars[gamma_key] = -1 if GammaChecks != "": for gamma in GammaChecks: if gamma[0] <= estep: pars['specimen_gamma'] = gamma[1] # -------------------------------------------------------------- # From here added By Ron Shaar 11-Dec 2012 # New parameters defined in Shaar and Tauxe (2012): # FRAC (specimen_frac) - ranges from 0. to 1. # SCAT (specimen_scat) - takes 1/0 # gap_max (specimen_gmax) - ranges from 0. to 1. # -------------------------------------------------------------- # -------------------------------------------------------------- # FRAC is similar to Fvds, but the numerator is the vds fraction: # FRAC= [ vds (start,end)] / total vds ] # gap_max= max [ (vector difference) / vds (start,end)] # -------------------------------------------------------------- # collect all zijderveld data to arrays and calculate VDS z_temperatures = [row[0] for row in zijdblock] zdata = [] # array of zero-fields measurements in Cartezian coordinates # array of vector differences (for vds calculation) vector_diffs = [] NRM = zijdblock[0][3] # NRM for k in range(len(zijdblock)): DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)] cart = dir2cart(DIR) zdata.append(np.array([cart[0], cart[1], cart[2]])) if k > 0: vector_diffs.append( np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2))) # last vector difference: from the last point to the origin. vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2))) vds = sum(vector_diffs) # vds calculation zdata = np.array(zdata) vector_diffs = np.array(vector_diffs) # calculate the vds within the chosen segment vector_diffs_segment = vector_diffs[zstart:zend] # FRAC calculation FRAC = old_div(sum(vector_diffs_segment), vds) pars[frac_key] = FRAC # gap_max calculation max_FRAC_gap = max( old_div(vector_diffs_segment, sum(vector_diffs_segment))) pars[gmax_key] = max_FRAC_gap # --------------------------------------------------------------------- # Calculate the "scat box" # all data-points, pTRM checks, and tail-checks, should be inside a "scat box" # --------------------------------------------------------------------- # intialization # fail scat due to arai plot data points pars["fail_arai_beta_box_scatter"] = False pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks pars[scat_key] = "t" # Pass by default # -------------------------------------------------------------- # collect all Arai plot data points in arrays x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], [] NRMs = araiblock[0] PTRMs = araiblock[1] ptrm_checks = araiblock[2] ptrm_tail = araiblock[3] PTRMs_temperatures = [row[0] for row in PTRMs] NRMs_temperatures = [row[0] for row in NRMs] NRM = NRMs[0][3] for k in range(len(NRMs)): index_pTRMs = PTRMs_temperatures.index(NRMs[k][0]) x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM)) y_Arai.append(old_div(NRMs[k][3], NRM)) t_Arai.append(NRMs[k][0]) if NRMs[k][4] == 1: steps_Arai.append('ZI') else: steps_Arai.append('IZ') x_Arai = np.array(x_Arai) y_Arai = np.array(y_Arai) # -------------------------------------------------------------- # collect all pTRM check to arrays x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], [] x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_checks)): if ptrm_checks[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_ptrm_check_starting_point.append(x_Arai[index]) y_ptrm_check_starting_point.append(y_Arai[index]) ptrm_checks_starting_temperatures.append( starting_temperature) index_zerofield = zerofield_temperatures.index( ptrm_checks[k][0]) x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM)) y_ptrm_check.append( old_div(zerofields[index_zerofield][3], NRM)) ptrm_checks_temperatures.append(ptrm_checks[k][0]) break except: pass x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point) y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point) ptrm_checks_starting_temperatures = np.array( ptrm_checks_starting_temperatures) x_ptrm_check = np.array(x_ptrm_check) y_ptrm_check = np.array(y_ptrm_check) ptrm_checks_temperatures = np.array(ptrm_checks_temperatures) # -------------------------------------------------------------- # collect tail checks to arrays x_tail_check, y_tail_check, tail_check_temperatures = [], [], [] x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_tail)): if ptrm_tail[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_tail_check_starting_point.append(x_Arai[index]) y_tail_check_starting_point.append(y_Arai[index]) tail_checks_starting_temperatures.append( starting_temperature) index_infield = infield_temperatures.index( ptrm_tail[k][0]) x_tail_check.append( old_div(infields[index_infield][3], NRM)) y_tail_check.append( old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM)) tail_check_temperatures.append(ptrm_tail[k][0]) break except: pass x_tail_check = np.array(x_tail_check) y_tail_check = np.array(y_tail_check) tail_check_temperatures = np.array(tail_check_temperatures) x_tail_check_starting_point = np.array(x_tail_check_starting_point) y_tail_check_starting_point = np.array(y_tail_check_starting_point) tail_checks_starting_temperatures = np.array( tail_checks_starting_temperatures) # -------------------------------------------------------------- # collect the chosen segment in the Arai plot to arrays x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot # -------------------------------------------------------------- # collect pTRM checks in segment to arrays # notice, this is different than the conventional DRATS. # for scat calculation we take only the pTRM checks which were carried out # before reaching the highest temperature in the chosen segment x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], [] for k in range(len(ptrm_checks_temperatures)): if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]: x_ptrm_check_for_SCAT.append(x_ptrm_check[k]) y_ptrm_check_for_SCAT.append(y_ptrm_check[k]) x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT) y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT) # -------------------------------------------------------------- # collect Tail checks in segment to arrays # for scat calculation we take only the tail checks which were carried out # before reaching the highest temperature in the chosen segment x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], [] for k in range(len(tail_check_temperatures)): if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]: x_tail_check_for_SCAT.append(x_tail_check[k]) y_tail_check_for_SCAT.append(y_tail_check[k]) x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT) y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT) # -------------------------------------------------------------- # calculate the lines that define the scat box: # if threshold value for beta is not defined, then scat cannot be calculated (pass) # in this case, scat pass if beta_key in list(accept.keys()) and accept[beta_key] != "": b_beta_threshold = float(accept[beta_key]) b = pars[b_key] # best fit line cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass a = cm_y - b * cm_x # lines with slope = slope +/- 2*(specimen_b_beta) two_sigma_beta_threshold = 2 * b_beta_threshold two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b) # a line with a shallower slope (b + 2*beta*b) passing through the center of mass # y=a1+b1x b1 = b + two_sigma_slope_threshold a1 = cm_y - b1 * cm_x # bounding line with steeper slope (b - 2*beta*b) passing through the center of mass # y=a2+b2x b2 = b - two_sigma_slope_threshold a2 = cm_y - b2 * cm_x # lower bounding line of the 'beta box' # y=intercept1+slop1x slop1 = old_div(a1, ((old_div(a2, b2)))) intercept1 = a1 # higher bounding line of the 'beta box' # y=intercept2+slop2x slop2 = old_div(a2, ((old_div(a1, b1)))) intercept2 = a2 pars['specimen_scat_bounding_line_high'] = [intercept2, slop2] pars['specimen_scat_bounding_line_low'] = [intercept1, slop1] # -------------------------------------------------------------- # check if the Arai data points are in the 'box' # the two bounding lines ymin = intercept1 + x_Arai_segment * slop1 ymax = intercept2 + x_Arai_segment * slop2 # arrays of "True" or "False" check_1 = y_Arai_segment > ymax check_2 = y_Arai_segment < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_arai_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the pTRM checks data points are in the 'box' if len(x_ptrm_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_ptrm_check_for_SCAT * slop1 ymax = intercept2 + x_ptrm_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_ptrm_check_for_SCAT > ymax check_2 = y_ptrm_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_ptrm_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the tail checks data points are in the 'box' if len(x_tail_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_tail_check_for_SCAT * slop1 ymax = intercept2 + x_tail_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_tail_check_for_SCAT > ymax check_2 = y_tail_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_tail_beta_box_scatter"] = True # -------------------------------------------------------------- # check if specimen_scat is PASS or FAIL: if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]: pars[scat_key] = 'f' else: pars[scat_key] = 't' return pars, 0
python
def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs): """ calculate the paleointensity magic parameters make some definitions """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: meth_key = 'method_codes' beta_key = 'int_b_beta' temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') z_key = 'int_z' drats_key = 'int_drats' drat_key = 'int_drat' md_key = 'int_md' dec_key = 'dir_dec' inc_key = 'dir_inc' mad_key = 'int_mad_free' dang_key = 'int_dang' ptrm_key = 'int_n_ptrm' theta_key = 'int_theta' gamma_key = 'int_gamma' delta_key = 'int_delta' frac_key = 'int_frac' gmax_key = 'int_gmax' scat_key = 'int_scat' else: beta_key = 'specimen_b_beta' meth_key = 'magic_method_codes' temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max' z_key = 'specimen_z' drats_key = 'specimen_drats' drat_key = 'specimen_drat' md_key = 'specimen_md' dec_key = 'specimen_dec' inc_key = 'specimen_inc' mad_key = 'specimen_int_mad' dang_key = 'specimen_dang' ptrm_key = 'specimen_int_ptrm_n' theta_key = 'specimen_theta' gamma_key = 'specimen_gamma' delta_key = 'specimen_delta' frac_key = 'specimen_frac' gmax_key = 'specimen_gmax' scat_key = 'specimen_scat' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", "" zptrm_check = [] first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[ 0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5] if len(araiblock) > 6: # used only for perpendicular method of paleointensity ThetaChecks = araiblock[6] # used only for perpendicular method of paleointensity DeltaChecks = araiblock[7] xi, yi, diffcum = [], [], 0 xiz, xzi, yiz, yzi = [], [], [], [] Nptrm, dmax = 0, -1e-22 # check if even zero and infield steps if len(first_Z) > len(first_I): maxe = len(first_I) - 1 else: maxe = len(first_Z) - 1 if end == 0 or end > maxe: end = maxe # get the MAD, DANG, etc. for directional data bstep = araiblock[0][start][0] estep = araiblock[0][end][0] zstart, zend = 0, len(zijdblock) for k in range(len(zijdblock)): zrec = zijdblock[k] if zrec[0] == bstep: zstart = k if zrec[0] == estep: zend = k PCA = domean(zijdblock, zstart, zend, 'DE-BFL') D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz for rec in zijdblock: D.append((rec[1], rec[2], rec[3])) Du.append((rec[1], rec[2])) if rec[4] == 1: Dzi.append((rec[1], rec[2])) # if this is ZI step else: Diz.append((rec[1], rec[2])) # if this is IZ step # calculate the vector difference sum vds = dovds(D) b_zi, b_iz = [], [] # collect data included in ZigZag calculation if end + 1 >= len(first_Z): stop = end - 1 else: stop = end for k in range(start, end + 1): for l in range(len(first_I)): irec = first_I[l] if irec[0] == first_Z[k][0]: xi.append(irec[3]) yi.append(first_Z[k][3]) pars, errcode = int_pars(xi, yi, vds) if errcode == 1: return pars, errcode # for k in range(start,end+1): for k in range(len(first_Z) - 1): for l in range(k): # only go down to 10% of NRM..... if old_div(first_Z[k][3], vds) > 0.1: irec = first_I[l] if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step xzi = irec[3] yzi = first_Z[k][3] xiz = first_I[l + 1][3] yiz = first_Z[k + 1][3] slope = np.arctan2((yzi - yiz), (xiz - xzi)) r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2) if r > .1 * vds: b_zi.append(slope) # suppress noise elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step xiz = irec[3] yiz = first_Z[k][3] xzi = first_I[l + 1][3] yzi = first_Z[k + 1][3] slope = np.arctan2((yiz - yzi), (xzi - xiz)) r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2) if r > .1 * vds: b_iz.append(slope) # suppress noise # ZigZag, Frat, Trat = -1, 0, 0 if len(Diz) > 2 and len(Dzi) > 2: ZigZag = 0 dizp = fisher_mean(Diz) # get Fisher stats on IZ steps dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps dup = fisher_mean(Du) # get Fisher stats on all steps # # if directions are TOO well grouped, can get false positive for ftest, so # angles must be > 3 degrees apart. # if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.: F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \ (dup['n'] - dzip['r'] - dizp['r'] ) # Watson test for common mean nf = 2. * (dup['n'] - 2.) # number of degees of freedom ftest = fcalc(2, nf) Frat = old_div(F, ftest) if Frat > 1.: ZigZag = Frat # fails zigzag on directions methcode = "SM-FTEST" # now do slopes if len(b_zi) > 2 and len(b_iz) > 2: bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev biz_m, biz_sig = gausspars(b_iz) n_zi = float(len(b_zi)) n_iz = float(len(b_iz)) b_diff = abs(bzi_m - biz_m) # difference in means # # avoid false positives - set 3 degree slope difference here too if b_diff > 3 * np.pi / 180.: nf = n_zi + n_iz - 2. # degrees of freedom svar = old_div(((n_zi - 1.) * bzi_sig**2 + (n_iz - 1.) * biz_sig**2), nf) T = old_div((b_diff), np.sqrt( svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t ttest = tcalc(nf, .05) # t-test at 95% conf. Trat = old_div(T, ttest) if Trat > 1 and Trat > Frat: ZigZag = Trat # fails zigzag on directions methcode = "SM-TTEST" pars[z_key] = ZigZag pars[meth_key] = methcode # do drats if len(ptrm_check) != 0: diffcum, drat_max = 0, 0 for prec in ptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 2 # don't count alteration that happens after this step if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] if abs(prec[3] - irec[3]) > drat_max: drat_max = abs(prec[3] - irec[3]) pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3]) elif len(zptrm_check) != 0: diffcum = 0 for prec in zptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 1 if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) else: pars[drats_key] = -1 pars[drat_key] = -1 # and the pTRM tails if len(ptrm_tail) != 0: for trec in ptrm_tail: step = trec[0] for irec in first_I: if irec[0] == step: break if abs(trec[3]) > dmax: dmax = abs(trec[3]) pars[md_key] = (100 * dmax / vds) else: pars[md_key] = -1 pars[min_key] = bstep pars[max_key] = estep pars[dec_key] = PCA["specimen_dec"] pars[inc_key] = PCA["specimen_inc"] pars[mad_key] = PCA["specimen_mad"] pars[dang_key] = PCA["specimen_dang"] pars[ptrm_key] = Nptrm # and the ThetaChecks if ThetaChecks != "": t = 0 for theta in ThetaChecks: if theta[0] >= bstep and theta[0] <= estep and theta[1] > t: t = theta[1] pars[theta_key] = t else: pars[theta_key] = -1 # and the DeltaChecks if DeltaChecks != "": d = 0 for delta in DeltaChecks: if delta[0] >= bstep and delta[0] <= estep and delta[1] > d: d = delta[1] pars[delta_key] else: pars[delta_key] = -1 pars[gamma_key] = -1 if GammaChecks != "": for gamma in GammaChecks: if gamma[0] <= estep: pars['specimen_gamma'] = gamma[1] # -------------------------------------------------------------- # From here added By Ron Shaar 11-Dec 2012 # New parameters defined in Shaar and Tauxe (2012): # FRAC (specimen_frac) - ranges from 0. to 1. # SCAT (specimen_scat) - takes 1/0 # gap_max (specimen_gmax) - ranges from 0. to 1. # -------------------------------------------------------------- # -------------------------------------------------------------- # FRAC is similar to Fvds, but the numerator is the vds fraction: # FRAC= [ vds (start,end)] / total vds ] # gap_max= max [ (vector difference) / vds (start,end)] # -------------------------------------------------------------- # collect all zijderveld data to arrays and calculate VDS z_temperatures = [row[0] for row in zijdblock] zdata = [] # array of zero-fields measurements in Cartezian coordinates # array of vector differences (for vds calculation) vector_diffs = [] NRM = zijdblock[0][3] # NRM for k in range(len(zijdblock)): DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)] cart = dir2cart(DIR) zdata.append(np.array([cart[0], cart[1], cart[2]])) if k > 0: vector_diffs.append( np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2))) # last vector difference: from the last point to the origin. vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2))) vds = sum(vector_diffs) # vds calculation zdata = np.array(zdata) vector_diffs = np.array(vector_diffs) # calculate the vds within the chosen segment vector_diffs_segment = vector_diffs[zstart:zend] # FRAC calculation FRAC = old_div(sum(vector_diffs_segment), vds) pars[frac_key] = FRAC # gap_max calculation max_FRAC_gap = max( old_div(vector_diffs_segment, sum(vector_diffs_segment))) pars[gmax_key] = max_FRAC_gap # --------------------------------------------------------------------- # Calculate the "scat box" # all data-points, pTRM checks, and tail-checks, should be inside a "scat box" # --------------------------------------------------------------------- # intialization # fail scat due to arai plot data points pars["fail_arai_beta_box_scatter"] = False pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks pars[scat_key] = "t" # Pass by default # -------------------------------------------------------------- # collect all Arai plot data points in arrays x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], [] NRMs = araiblock[0] PTRMs = araiblock[1] ptrm_checks = araiblock[2] ptrm_tail = araiblock[3] PTRMs_temperatures = [row[0] for row in PTRMs] NRMs_temperatures = [row[0] for row in NRMs] NRM = NRMs[0][3] for k in range(len(NRMs)): index_pTRMs = PTRMs_temperatures.index(NRMs[k][0]) x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM)) y_Arai.append(old_div(NRMs[k][3], NRM)) t_Arai.append(NRMs[k][0]) if NRMs[k][4] == 1: steps_Arai.append('ZI') else: steps_Arai.append('IZ') x_Arai = np.array(x_Arai) y_Arai = np.array(y_Arai) # -------------------------------------------------------------- # collect all pTRM check to arrays x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], [] x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_checks)): if ptrm_checks[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_ptrm_check_starting_point.append(x_Arai[index]) y_ptrm_check_starting_point.append(y_Arai[index]) ptrm_checks_starting_temperatures.append( starting_temperature) index_zerofield = zerofield_temperatures.index( ptrm_checks[k][0]) x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM)) y_ptrm_check.append( old_div(zerofields[index_zerofield][3], NRM)) ptrm_checks_temperatures.append(ptrm_checks[k][0]) break except: pass x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point) y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point) ptrm_checks_starting_temperatures = np.array( ptrm_checks_starting_temperatures) x_ptrm_check = np.array(x_ptrm_check) y_ptrm_check = np.array(y_ptrm_check) ptrm_checks_temperatures = np.array(ptrm_checks_temperatures) # -------------------------------------------------------------- # collect tail checks to arrays x_tail_check, y_tail_check, tail_check_temperatures = [], [], [] x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_tail)): if ptrm_tail[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_tail_check_starting_point.append(x_Arai[index]) y_tail_check_starting_point.append(y_Arai[index]) tail_checks_starting_temperatures.append( starting_temperature) index_infield = infield_temperatures.index( ptrm_tail[k][0]) x_tail_check.append( old_div(infields[index_infield][3], NRM)) y_tail_check.append( old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM)) tail_check_temperatures.append(ptrm_tail[k][0]) break except: pass x_tail_check = np.array(x_tail_check) y_tail_check = np.array(y_tail_check) tail_check_temperatures = np.array(tail_check_temperatures) x_tail_check_starting_point = np.array(x_tail_check_starting_point) y_tail_check_starting_point = np.array(y_tail_check_starting_point) tail_checks_starting_temperatures = np.array( tail_checks_starting_temperatures) # -------------------------------------------------------------- # collect the chosen segment in the Arai plot to arrays x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot # -------------------------------------------------------------- # collect pTRM checks in segment to arrays # notice, this is different than the conventional DRATS. # for scat calculation we take only the pTRM checks which were carried out # before reaching the highest temperature in the chosen segment x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], [] for k in range(len(ptrm_checks_temperatures)): if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]: x_ptrm_check_for_SCAT.append(x_ptrm_check[k]) y_ptrm_check_for_SCAT.append(y_ptrm_check[k]) x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT) y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT) # -------------------------------------------------------------- # collect Tail checks in segment to arrays # for scat calculation we take only the tail checks which were carried out # before reaching the highest temperature in the chosen segment x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], [] for k in range(len(tail_check_temperatures)): if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]: x_tail_check_for_SCAT.append(x_tail_check[k]) y_tail_check_for_SCAT.append(y_tail_check[k]) x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT) y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT) # -------------------------------------------------------------- # calculate the lines that define the scat box: # if threshold value for beta is not defined, then scat cannot be calculated (pass) # in this case, scat pass if beta_key in list(accept.keys()) and accept[beta_key] != "": b_beta_threshold = float(accept[beta_key]) b = pars[b_key] # best fit line cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass a = cm_y - b * cm_x # lines with slope = slope +/- 2*(specimen_b_beta) two_sigma_beta_threshold = 2 * b_beta_threshold two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b) # a line with a shallower slope (b + 2*beta*b) passing through the center of mass # y=a1+b1x b1 = b + two_sigma_slope_threshold a1 = cm_y - b1 * cm_x # bounding line with steeper slope (b - 2*beta*b) passing through the center of mass # y=a2+b2x b2 = b - two_sigma_slope_threshold a2 = cm_y - b2 * cm_x # lower bounding line of the 'beta box' # y=intercept1+slop1x slop1 = old_div(a1, ((old_div(a2, b2)))) intercept1 = a1 # higher bounding line of the 'beta box' # y=intercept2+slop2x slop2 = old_div(a2, ((old_div(a1, b1)))) intercept2 = a2 pars['specimen_scat_bounding_line_high'] = [intercept2, slop2] pars['specimen_scat_bounding_line_low'] = [intercept1, slop1] # -------------------------------------------------------------- # check if the Arai data points are in the 'box' # the two bounding lines ymin = intercept1 + x_Arai_segment * slop1 ymax = intercept2 + x_Arai_segment * slop2 # arrays of "True" or "False" check_1 = y_Arai_segment > ymax check_2 = y_Arai_segment < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_arai_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the pTRM checks data points are in the 'box' if len(x_ptrm_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_ptrm_check_for_SCAT * slop1 ymax = intercept2 + x_ptrm_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_ptrm_check_for_SCAT > ymax check_2 = y_ptrm_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_ptrm_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the tail checks data points are in the 'box' if len(x_tail_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_tail_check_for_SCAT * slop1 ymax = intercept2 + x_tail_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_tail_check_for_SCAT > ymax check_2 = y_tail_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_tail_beta_box_scatter"] = True # -------------------------------------------------------------- # check if specimen_scat is PASS or FAIL: if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]: pars[scat_key] = 'f' else: pars[scat_key] = 't' return pars, 0
['def', 'PintPars', '(', 'datablock', ',', 'araiblock', ',', 'zijdblock', ',', 'start', ',', 'end', ',', 'accept', ',', '*', '*', 'kwargs', ')', ':', 'if', "'version'", 'in', 'list', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'and', 'kwargs', '[', "'version'", ']', '==', '3', ':', 'meth_key', '=', "'method_codes'", 'beta_key', '=', "'int_b_beta'", 'temp_key', ',', 'min_key', ',', 'max_key', '=', "'treat_temp'", ',', "'meas_step_min'", ',', "'meas_step_max'", 'dc_theta_key', ',', 'dc_phi_key', '=', "'treat_dc_field_theta'", ',', "'treat_dc_field_phi'", '# convert dataframe to list of dictionaries', 'datablock', '=', 'datablock', '.', 'to_dict', '(', "'records'", ')', 'z_key', '=', "'int_z'", 'drats_key', '=', "'int_drats'", 'drat_key', '=', "'int_drat'", 'md_key', '=', "'int_md'", 'dec_key', '=', "'dir_dec'", 'inc_key', '=', "'dir_inc'", 'mad_key', '=', "'int_mad_free'", 'dang_key', '=', "'int_dang'", 'ptrm_key', '=', "'int_n_ptrm'", 'theta_key', '=', "'int_theta'", 'gamma_key', '=', "'int_gamma'", 'delta_key', '=', "'int_delta'", 'frac_key', '=', "'int_frac'", 'gmax_key', '=', "'int_gmax'", 'scat_key', '=', "'int_scat'", 'else', ':', 'beta_key', '=', "'specimen_b_beta'", 'meth_key', '=', "'magic_method_codes'", 'temp_key', ',', 'min_key', ',', 'max_key', '=', "'treatment_temp'", ',', "'measurement_step_min'", ',', "'measurement_step_max'", 'z_key', '=', "'specimen_z'", 'drats_key', '=', "'specimen_drats'", 'drat_key', '=', "'specimen_drat'", 'md_key', '=', "'specimen_md'", 'dec_key', '=', "'specimen_dec'", 'inc_key', '=', "'specimen_inc'", 'mad_key', '=', "'specimen_int_mad'", 'dang_key', '=', "'specimen_dang'", 'ptrm_key', '=', "'specimen_int_ptrm_n'", 'theta_key', '=', "'specimen_theta'", 'gamma_key', '=', "'specimen_gamma'", 'delta_key', '=', "'specimen_delta'", 'frac_key', '=', "'specimen_frac'", 'gmax_key', '=', "'specimen_gmax'", 'scat_key', '=', "'specimen_scat'", 'first_Z', ',', 'first_I', ',', 'zptrm_check', ',', 'ptrm_check', ',', 'ptrm_tail', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'methcode', ',', 'ThetaChecks', ',', 'DeltaChecks', ',', 'GammaChecks', '=', '""', ',', '""', ',', '""', ',', '""', 'zptrm_check', '=', '[', ']', 'first_Z', ',', 'first_I', ',', 'ptrm_check', ',', 'ptrm_tail', ',', 'zptrm_check', ',', 'GammaChecks', '=', 'araiblock', '[', '0', ']', ',', 'araiblock', '[', '1', ']', ',', 'araiblock', '[', '2', ']', ',', 'araiblock', '[', '3', ']', ',', 'araiblock', '[', '4', ']', ',', 'araiblock', '[', '5', ']', 'if', 'len', '(', 'araiblock', ')', '>', '6', ':', '# used only for perpendicular method of paleointensity', 'ThetaChecks', '=', 'araiblock', '[', '6', ']', '# used only for perpendicular method of paleointensity', 'DeltaChecks', '=', 'araiblock', '[', '7', ']', 'xi', ',', 'yi', ',', 'diffcum', '=', '[', ']', ',', '[', ']', ',', '0', 'xiz', ',', 'xzi', ',', 'yiz', ',', 'yzi', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'Nptrm', ',', 'dmax', '=', '0', ',', '-', '1e-22', '# check if even zero and infield steps', 'if', 'len', '(', 'first_Z', ')', '>', 'len', '(', 'first_I', ')', ':', 'maxe', '=', 'len', '(', 'first_I', ')', '-', '1', 'else', ':', 'maxe', '=', 'len', '(', 'first_Z', ')', '-', '1', 'if', 'end', '==', '0', 'or', 'end', '>', 'maxe', ':', 'end', '=', 'maxe', '# get the MAD, DANG, etc. for directional data', 'bstep', '=', 'araiblock', '[', '0', ']', '[', 'start', ']', '[', '0', ']', 'estep', '=', 'araiblock', '[', '0', ']', '[', 'end', ']', '[', '0', ']', 'zstart', ',', 'zend', '=', '0', ',', 'len', '(', 'zijdblock', ')', 'for', 'k', 'in', 'range', '(', 'len', '(', 'zijdblock', ')', ')', ':', 'zrec', '=', 'zijdblock', '[', 'k', ']', 'if', 'zrec', '[', '0', ']', '==', 'bstep', ':', 'zstart', '=', 'k', 'if', 'zrec', '[', '0', ']', '==', 'estep', ':', 'zend', '=', 'k', 'PCA', '=', 'domean', '(', 'zijdblock', ',', 'zstart', ',', 'zend', ',', "'DE-BFL'", ')', 'D', ',', 'Diz', ',', 'Dzi', ',', 'Du', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', '# list of NRM vectors, and separated by zi and iz', 'for', 'rec', 'in', 'zijdblock', ':', 'D', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ',', 'rec', '[', '3', ']', ')', ')', 'Du', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', 'if', 'rec', '[', '4', ']', '==', '1', ':', 'Dzi', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', '# if this is ZI step', 'else', ':', 'Diz', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', '# if this is IZ step', '# calculate the vector difference sum', 'vds', '=', 'dovds', '(', 'D', ')', 'b_zi', ',', 'b_iz', '=', '[', ']', ',', '[', ']', '# collect data included in ZigZag calculation', 'if', 'end', '+', '1', '>=', 'len', '(', 'first_Z', ')', ':', 'stop', '=', 'end', '-', '1', 'else', ':', 'stop', '=', 'end', 'for', 'k', 'in', 'range', '(', 'start', ',', 'end', '+', '1', ')', ':', 'for', 'l', 'in', 'range', '(', 'len', '(', 'first_I', ')', ')', ':', 'irec', '=', 'first_I', '[', 'l', ']', 'if', 'irec', '[', '0', ']', '==', 'first_Z', '[', 'k', ']', '[', '0', ']', ':', 'xi', '.', 'append', '(', 'irec', '[', '3', ']', ')', 'yi', '.', 'append', '(', 'first_Z', '[', 'k', ']', '[', '3', ']', ')', 'pars', ',', 'errcode', '=', 'int_pars', '(', 'xi', ',', 'yi', ',', 'vds', ')', 'if', 'errcode', '==', '1', ':', 'return', 'pars', ',', 'errcode', '# for k in range(start,end+1):', 'for', 'k', 'in', 'range', '(', 'len', '(', 'first_Z', ')', '-', '1', ')', ':', 'for', 'l', 'in', 'range', '(', 'k', ')', ':', '# only go down to 10% of NRM.....', 'if', 'old_div', '(', 'first_Z', '[', 'k', ']', '[', '3', ']', ',', 'vds', ')', '>', '0.1', ':', 'irec', '=', 'first_I', '[', 'l', ']', 'if', 'irec', '[', '4', ']', '==', '1', 'and', 'first_I', '[', 'l', '+', '1', ']', '[', '4', ']', '==', '0', ':', '# a ZI step', 'xzi', '=', 'irec', '[', '3', ']', 'yzi', '=', 'first_Z', '[', 'k', ']', '[', '3', ']', 'xiz', '=', 'first_I', '[', 'l', '+', '1', ']', '[', '3', ']', 'yiz', '=', 'first_Z', '[', 'k', '+', '1', ']', '[', '3', ']', 'slope', '=', 'np', '.', 'arctan2', '(', '(', 'yzi', '-', 'yiz', ')', ',', '(', 'xiz', '-', 'xzi', ')', ')', 'r', '=', 'np', '.', 'sqrt', '(', '(', 'yzi', '-', 'yiz', ')', '**', '2', '+', '(', 'xiz', '-', 'xzi', ')', '**', '2', ')', 'if', 'r', '>', '.1', '*', 'vds', ':', 'b_zi', '.', 'append', '(', 'slope', ')', '# suppress noise', 'elif', 'irec', '[', '4', ']', '==', '0', 'and', 'first_I', '[', 'l', '+', '1', ']', '[', '4', ']', '==', '1', ':', '# an IZ step', 'xiz', '=', 'irec', '[', '3', ']', 'yiz', '=', 'first_Z', '[', 'k', ']', '[', '3', ']', 'xzi', '=', 'first_I', '[', 'l', '+', '1', ']', '[', '3', ']', 'yzi', '=', 'first_Z', '[', 'k', '+', '1', ']', '[', '3', ']', 'slope', '=', 'np', '.', 'arctan2', '(', '(', 'yiz', '-', 'yzi', ')', ',', '(', 'xzi', '-', 'xiz', ')', ')', 'r', '=', 'np', '.', 'sqrt', '(', '(', 'yiz', '-', 'yzi', ')', '**', '2', '+', '(', 'xzi', '-', 'xiz', ')', '**', '2', ')', 'if', 'r', '>', '.1', '*', 'vds', ':', 'b_iz', '.', 'append', '(', 'slope', ')', '# suppress noise', '#', 'ZigZag', ',', 'Frat', ',', 'Trat', '=', '-', '1', ',', '0', ',', '0', 'if', 'len', '(', 'Diz', ')', '>', '2', 'and', 'len', '(', 'Dzi', ')', '>', '2', ':', 'ZigZag', '=', '0', 'dizp', '=', 'fisher_mean', '(', 'Diz', ')', '# get Fisher stats on IZ steps', 'dzip', '=', 'fisher_mean', '(', 'Dzi', ')', '# get Fisher stats on ZI steps', 'dup', '=', 'fisher_mean', '(', 'Du', ')', '# get Fisher stats on all steps', '#', '# if directions are TOO well grouped, can get false positive for ftest, so', '# angles must be > 3 degrees apart.', '#', 'if', 'angle', '(', '[', 'dizp', '[', "'dec'", ']', ',', 'dizp', '[', "'inc'", ']', ']', ',', '[', 'dzip', '[', "'dec'", ']', ',', 'dzip', '[', "'inc'", ']', ']', ')', '>', '3.', ':', 'F', '=', '(', 'dup', '[', "'n'", ']', '-', '2.', ')', '*', '(', 'dzip', '[', "'r'", ']', '+', 'dizp', '[', "'r'", ']', '-', 'dup', '[', "'r'", ']', ')', '/', '(', 'dup', '[', "'n'", ']', '-', 'dzip', '[', "'r'", ']', '-', 'dizp', '[', "'r'", ']', ')', '# Watson test for common mean', 'nf', '=', '2.', '*', '(', 'dup', '[', "'n'", ']', '-', '2.', ')', '# number of degees of freedom', 'ftest', '=', 'fcalc', '(', '2', ',', 'nf', ')', 'Frat', '=', 'old_div', '(', 'F', ',', 'ftest', ')', 'if', 'Frat', '>', '1.', ':', 'ZigZag', '=', 'Frat', '# fails zigzag on directions', 'methcode', '=', '"SM-FTEST"', '# now do slopes', 'if', 'len', '(', 'b_zi', ')', '>', '2', 'and', 'len', '(', 'b_iz', ')', '>', '2', ':', 'bzi_m', ',', 'bzi_sig', '=', 'gausspars', '(', 'b_zi', ')', '# mean, std dev', 'biz_m', ',', 'biz_sig', '=', 'gausspars', '(', 'b_iz', ')', 'n_zi', '=', 'float', '(', 'len', '(', 'b_zi', ')', ')', 'n_iz', '=', 'float', '(', 'len', '(', 'b_iz', ')', ')', 'b_diff', '=', 'abs', '(', 'bzi_m', '-', 'biz_m', ')', '# difference in means', '#', '# avoid false positives - set 3 degree slope difference here too', 'if', 'b_diff', '>', '3', '*', 'np', '.', 'pi', '/', '180.', ':', 'nf', '=', 'n_zi', '+', 'n_iz', '-', '2.', '# degrees of freedom', 'svar', '=', 'old_div', '(', '(', '(', 'n_zi', '-', '1.', ')', '*', 'bzi_sig', '**', '2', '+', '(', 'n_iz', '-', '1.', ')', '*', 'biz_sig', '**', '2', ')', ',', 'nf', ')', 'T', '=', 'old_div', '(', '(', 'b_diff', ')', ',', 'np', '.', 'sqrt', '(', 'svar', '*', '(', 'old_div', '(', '1.0', ',', 'n_zi', ')', '+', 'old_div', '(', '1.0', ',', 'n_iz', ')', ')', ')', ')', "# student's t", 'ttest', '=', 'tcalc', '(', 'nf', ',', '.05', ')', '# t-test at 95% conf.', 'Trat', '=', 'old_div', '(', 'T', ',', 'ttest', ')', 'if', 'Trat', '>', '1', 'and', 'Trat', '>', 'Frat', ':', 'ZigZag', '=', 'Trat', '# fails zigzag on directions', 'methcode', '=', '"SM-TTEST"', 'pars', '[', 'z_key', ']', '=', 'ZigZag', 'pars', '[', 'meth_key', ']', '=', 'methcode', '# do drats', 'if', 'len', '(', 'ptrm_check', ')', '!=', '0', ':', 'diffcum', ',', 'drat_max', '=', '0', ',', '0', 'for', 'prec', 'in', 'ptrm_check', ':', 'step', '=', 'prec', '[', '0', ']', 'endbak', '=', 'end', 'zend', '=', 'end', 'while', 'zend', '>', 'len', '(', 'zijdblock', ')', '-', '1', ':', 'zend', '=', 'zend', '-', '2', "# don't count alteration that happens after this step", 'if', 'step', '<', 'zijdblock', '[', 'zend', ']', '[', '0', ']', ':', 'Nptrm', '+=', '1', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'diffcum', '+=', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', 'if', 'abs', '(', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', ')', '>', 'drat_max', ':', 'drat_max', '=', 'abs', '(', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', ')', 'pars', '[', 'drats_key', ']', '=', '(', '100', '*', 'abs', '(', 'diffcum', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'pars', '[', 'drat_key', ']', '=', '(', '100', '*', 'abs', '(', 'drat_max', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'elif', 'len', '(', 'zptrm_check', ')', '!=', '0', ':', 'diffcum', '=', '0', 'for', 'prec', 'in', 'zptrm_check', ':', 'step', '=', 'prec', '[', '0', ']', 'endbak', '=', 'end', 'zend', '=', 'end', 'while', 'zend', '>', 'len', '(', 'zijdblock', ')', '-', '1', ':', 'zend', '=', 'zend', '-', '1', 'if', 'step', '<', 'zijdblock', '[', 'zend', ']', '[', '0', ']', ':', 'Nptrm', '+=', '1', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'diffcum', '+=', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', 'pars', '[', 'drats_key', ']', '=', '(', '100', '*', 'abs', '(', 'diffcum', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'else', ':', 'pars', '[', 'drats_key', ']', '=', '-', '1', 'pars', '[', 'drat_key', ']', '=', '-', '1', '# and the pTRM tails', 'if', 'len', '(', 'ptrm_tail', ')', '!=', '0', ':', 'for', 'trec', 'in', 'ptrm_tail', ':', 'step', '=', 'trec', '[', '0', ']', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'if', 'abs', '(', 'trec', '[', '3', ']', ')', '>', 'dmax', ':', 'dmax', '=', 'abs', '(', 'trec', '[', '3', ']', ')', 'pars', '[', 'md_key', ']', '=', '(', '100', '*', 'dmax', '/', 'vds', ')', 'else', ':', 'pars', '[', 'md_key', ']', '=', '-', '1', 'pars', '[', 'min_key', ']', '=', 'bstep', 'pars', '[', 'max_key', ']', '=', 'estep', 'pars', '[', 'dec_key', ']', '=', 'PCA', '[', '"specimen_dec"', ']', 'pars', '[', 'inc_key', ']', '=', 'PCA', '[', '"specimen_inc"', ']', 'pars', '[', 'mad_key', ']', '=', 'PCA', '[', '"specimen_mad"', ']', 'pars', '[', 'dang_key', ']', '=', 'PCA', '[', '"specimen_dang"', ']', 'pars', '[', 'ptrm_key', ']', '=', 'Nptrm', '# and the ThetaChecks', 'if', 'ThetaChecks', '!=', '""', ':', 't', '=', '0', 'for', 'theta', 'in', 'ThetaChecks', ':', 'if', 'theta', '[', '0', ']', '>=', 'bstep', 'and', 'theta', '[', '0', ']', '<=', 'estep', 'and', 'theta', '[', '1', ']', '>', 't', ':', 't', '=', 'theta', '[', '1', ']', 'pars', '[', 'theta_key', ']', '=', 't', 'else', ':', 'pars', '[', 'theta_key', ']', '=', '-', '1', '# and the DeltaChecks', 'if', 'DeltaChecks', '!=', '""', ':', 'd', '=', '0', 'for', 'delta', 'in', 'DeltaChecks', ':', 'if', 'delta', '[', '0', ']', '>=', 'bstep', 'and', 'delta', '[', '0', ']', '<=', 'estep', 'and', 'delta', '[', '1', ']', '>', 'd', ':', 'd', '=', 'delta', '[', '1', ']', 'pars', '[', 'delta_key', ']', 'else', ':', 'pars', '[', 'delta_key', ']', '=', '-', '1', 'pars', '[', 'gamma_key', ']', '=', '-', '1', 'if', 'GammaChecks', '!=', '""', ':', 'for', 'gamma', 'in', 'GammaChecks', ':', 'if', 'gamma', '[', '0', ']', '<=', 'estep', ':', 'pars', '[', "'specimen_gamma'", ']', '=', 'gamma', '[', '1', ']', '# --------------------------------------------------------------', '# From here added By Ron Shaar 11-Dec 2012', '# New parameters defined in Shaar and Tauxe (2012):', '# FRAC (specimen_frac) - ranges from 0. to 1.', '# SCAT (specimen_scat) - takes 1/0', '# gap_max (specimen_gmax) - ranges from 0. to 1.', '# --------------------------------------------------------------', '# --------------------------------------------------------------', '# FRAC is similar to Fvds, but the numerator is the vds fraction:', '# FRAC= [ vds (start,end)] / total vds ]', '# gap_max= max [ (vector difference) / vds (start,end)]', '# --------------------------------------------------------------', '# collect all zijderveld data to arrays and calculate VDS', 'z_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'zijdblock', ']', 'zdata', '=', '[', ']', '# array of zero-fields measurements in Cartezian coordinates', '# array of vector differences (for vds calculation)', 'vector_diffs', '=', '[', ']', 'NRM', '=', 'zijdblock', '[', '0', ']', '[', '3', ']', '# NRM', 'for', 'k', 'in', 'range', '(', 'len', '(', 'zijdblock', ')', ')', ':', 'DIR', '=', '[', 'zijdblock', '[', 'k', ']', '[', '1', ']', ',', 'zijdblock', '[', 'k', ']', '[', '2', ']', ',', 'old_div', '(', 'zijdblock', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ']', 'cart', '=', 'dir2cart', '(', 'DIR', ')', 'zdata', '.', 'append', '(', 'np', '.', 'array', '(', '[', 'cart', '[', '0', ']', ',', 'cart', '[', '1', ']', ',', 'cart', '[', '2', ']', ']', ')', ')', 'if', 'k', '>', '0', ':', 'vector_diffs', '.', 'append', '(', 'np', '.', 'sqrt', '(', 'sum', '(', '(', 'np', '.', 'array', '(', 'zdata', '[', '-', '2', ']', ')', '-', 'np', '.', 'array', '(', 'zdata', '[', '-', '1', ']', ')', ')', '**', '2', ')', ')', ')', '# last vector difference: from the last point to the origin.', 'vector_diffs', '.', 'append', '(', 'np', '.', 'sqrt', '(', 'sum', '(', 'np', '.', 'array', '(', 'zdata', '[', '-', '1', ']', ')', '**', '2', ')', ')', ')', 'vds', '=', 'sum', '(', 'vector_diffs', ')', '# vds calculation', 'zdata', '=', 'np', '.', 'array', '(', 'zdata', ')', 'vector_diffs', '=', 'np', '.', 'array', '(', 'vector_diffs', ')', '# calculate the vds within the chosen segment', 'vector_diffs_segment', '=', 'vector_diffs', '[', 'zstart', ':', 'zend', ']', '# FRAC calculation', 'FRAC', '=', 'old_div', '(', 'sum', '(', 'vector_diffs_segment', ')', ',', 'vds', ')', 'pars', '[', 'frac_key', ']', '=', 'FRAC', '# gap_max calculation', 'max_FRAC_gap', '=', 'max', '(', 'old_div', '(', 'vector_diffs_segment', ',', 'sum', '(', 'vector_diffs_segment', ')', ')', ')', 'pars', '[', 'gmax_key', ']', '=', 'max_FRAC_gap', '# ---------------------------------------------------------------------', '# Calculate the "scat box"', '# all data-points, pTRM checks, and tail-checks, should be inside a "scat box"', '# ---------------------------------------------------------------------', '# intialization', '# fail scat due to arai plot data points', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', '=', 'False', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', '=', 'False', '# fail scat due to pTRM checks', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', '=', 'False', '# fail scat due to tail checks', 'pars', '[', 'scat_key', ']', '=', '"t"', '# Pass by default', '# --------------------------------------------------------------', '# collect all Arai plot data points in arrays', 'x_Arai', ',', 'y_Arai', ',', 't_Arai', ',', 'steps_Arai', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'NRMs', '=', 'araiblock', '[', '0', ']', 'PTRMs', '=', 'araiblock', '[', '1', ']', 'ptrm_checks', '=', 'araiblock', '[', '2', ']', 'ptrm_tail', '=', 'araiblock', '[', '3', ']', 'PTRMs_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'PTRMs', ']', 'NRMs_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'NRMs', ']', 'NRM', '=', 'NRMs', '[', '0', ']', '[', '3', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'NRMs', ')', ')', ':', 'index_pTRMs', '=', 'PTRMs_temperatures', '.', 'index', '(', 'NRMs', '[', 'k', ']', '[', '0', ']', ')', 'x_Arai', '.', 'append', '(', 'old_div', '(', 'PTRMs', '[', 'index_pTRMs', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_Arai', '.', 'append', '(', 'old_div', '(', 'NRMs', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ')', 't_Arai', '.', 'append', '(', 'NRMs', '[', 'k', ']', '[', '0', ']', ')', 'if', 'NRMs', '[', 'k', ']', '[', '4', ']', '==', '1', ':', 'steps_Arai', '.', 'append', '(', "'ZI'", ')', 'else', ':', 'steps_Arai', '.', 'append', '(', "'IZ'", ')', 'x_Arai', '=', 'np', '.', 'array', '(', 'x_Arai', ')', 'y_Arai', '=', 'np', '.', 'array', '(', 'y_Arai', ')', '# --------------------------------------------------------------', '# collect all pTRM check to arrays', 'x_ptrm_check', ',', 'y_ptrm_check', ',', 'ptrm_checks_temperatures', ',', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'x_ptrm_check_starting_point', ',', 'y_ptrm_check_starting_point', ',', 'ptrm_checks_starting_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_checks', ')', ')', ':', 'if', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', 'in', 'NRMs_temperatures', ':', '# find the starting point of the pTRM check:', 'for', 'i', 'in', 'range', '(', 'len', '(', 'datablock', ')', ')', ':', 'rec', '=', 'datablock', '[', 'i', ']', 'if', '"LT-PTRM-I"', 'in', 'rec', '[', 'meth_key', ']', 'and', 'float', '(', 'rec', '[', 'temp_key', ']', ')', '==', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ':', 'starting_temperature', '=', '(', 'float', '(', 'datablock', '[', 'i', '-', '1', ']', '[', 'temp_key', ']', ')', ')', 'try', ':', 'index', '=', 't_Arai', '.', 'index', '(', 'starting_temperature', ')', 'x_ptrm_check_starting_point', '.', 'append', '(', 'x_Arai', '[', 'index', ']', ')', 'y_ptrm_check_starting_point', '.', 'append', '(', 'y_Arai', '[', 'index', ']', ')', 'ptrm_checks_starting_temperatures', '.', 'append', '(', 'starting_temperature', ')', 'index_zerofield', '=', 'zerofield_temperatures', '.', 'index', '(', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ')', 'x_ptrm_check', '.', 'append', '(', 'old_div', '(', 'ptrm_checks', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_ptrm_check', '.', 'append', '(', 'old_div', '(', 'zerofields', '[', 'index_zerofield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'ptrm_checks_temperatures', '.', 'append', '(', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ')', 'break', 'except', ':', 'pass', 'x_ptrm_check_starting_point', '=', 'np', '.', 'array', '(', 'x_ptrm_check_starting_point', ')', 'y_ptrm_check_starting_point', '=', 'np', '.', 'array', '(', 'y_ptrm_check_starting_point', ')', 'ptrm_checks_starting_temperatures', '=', 'np', '.', 'array', '(', 'ptrm_checks_starting_temperatures', ')', 'x_ptrm_check', '=', 'np', '.', 'array', '(', 'x_ptrm_check', ')', 'y_ptrm_check', '=', 'np', '.', 'array', '(', 'y_ptrm_check', ')', 'ptrm_checks_temperatures', '=', 'np', '.', 'array', '(', 'ptrm_checks_temperatures', ')', '# --------------------------------------------------------------', '# collect tail checks to arrays', 'x_tail_check', ',', 'y_tail_check', ',', 'tail_check_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'x_tail_check_starting_point', ',', 'y_tail_check_starting_point', ',', 'tail_checks_starting_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_tail', ')', ')', ':', 'if', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', 'in', 'NRMs_temperatures', ':', '# find the starting point of the pTRM check:', 'for', 'i', 'in', 'range', '(', 'len', '(', 'datablock', ')', ')', ':', 'rec', '=', 'datablock', '[', 'i', ']', 'if', '"LT-PTRM-MD"', 'in', 'rec', '[', 'meth_key', ']', 'and', 'float', '(', 'rec', '[', 'temp_key', ']', ')', '==', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ':', 'starting_temperature', '=', '(', 'float', '(', 'datablock', '[', 'i', '-', '1', ']', '[', 'temp_key', ']', ')', ')', 'try', ':', 'index', '=', 't_Arai', '.', 'index', '(', 'starting_temperature', ')', 'x_tail_check_starting_point', '.', 'append', '(', 'x_Arai', '[', 'index', ']', ')', 'y_tail_check_starting_point', '.', 'append', '(', 'y_Arai', '[', 'index', ']', ')', 'tail_checks_starting_temperatures', '.', 'append', '(', 'starting_temperature', ')', 'index_infield', '=', 'infield_temperatures', '.', 'index', '(', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ')', 'x_tail_check', '.', 'append', '(', 'old_div', '(', 'infields', '[', 'index_infield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_tail_check', '.', 'append', '(', 'old_div', '(', 'ptrm_tail', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', '+', 'old_div', '(', 'zerofields', '[', 'index_infield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'tail_check_temperatures', '.', 'append', '(', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ')', 'break', 'except', ':', 'pass', 'x_tail_check', '=', 'np', '.', 'array', '(', 'x_tail_check', ')', 'y_tail_check', '=', 'np', '.', 'array', '(', 'y_tail_check', ')', 'tail_check_temperatures', '=', 'np', '.', 'array', '(', 'tail_check_temperatures', ')', 'x_tail_check_starting_point', '=', 'np', '.', 'array', '(', 'x_tail_check_starting_point', ')', 'y_tail_check_starting_point', '=', 'np', '.', 'array', '(', 'y_tail_check_starting_point', ')', 'tail_checks_starting_temperatures', '=', 'np', '.', 'array', '(', 'tail_checks_starting_temperatures', ')', '# --------------------------------------------------------------', '# collect the chosen segment in the Arai plot to arrays', 'x_Arai_segment', '=', 'x_Arai', '[', 'start', ':', 'end', '+', '1', ']', '# chosen segent in the Arai plot', 'y_Arai_segment', '=', 'y_Arai', '[', 'start', ':', 'end', '+', '1', ']', '# chosen segent in the Arai plot', '# --------------------------------------------------------------', '# collect pTRM checks in segment to arrays', '# notice, this is different than the conventional DRATS.', '# for scat calculation we take only the pTRM checks which were carried out', '# before reaching the highest temperature in the chosen segment', 'x_ptrm_check_for_SCAT', ',', 'y_ptrm_check_for_SCAT', '=', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_checks_temperatures', ')', ')', ':', 'if', 'ptrm_checks_temperatures', '[', 'k', ']', '>=', 'pars', '[', 'min_key', ']', 'and', 'ptrm_checks_starting_temperatures', '<=', 'pars', '[', 'max_key', ']', ':', 'x_ptrm_check_for_SCAT', '.', 'append', '(', 'x_ptrm_check', '[', 'k', ']', ')', 'y_ptrm_check_for_SCAT', '.', 'append', '(', 'y_ptrm_check', '[', 'k', ']', ')', 'x_ptrm_check_for_SCAT', '=', 'np', '.', 'array', '(', 'x_ptrm_check_for_SCAT', ')', 'y_ptrm_check_for_SCAT', '=', 'np', '.', 'array', '(', 'y_ptrm_check_for_SCAT', ')', '# --------------------------------------------------------------', '# collect Tail checks in segment to arrays', '# for scat calculation we take only the tail checks which were carried out', '# before reaching the highest temperature in the chosen segment', 'x_tail_check_for_SCAT', ',', 'y_tail_check_for_SCAT', '=', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'tail_check_temperatures', ')', ')', ':', 'if', 'tail_check_temperatures', '[', 'k', ']', '>=', 'pars', '[', 'min_key', ']', 'and', 'tail_checks_starting_temperatures', '[', 'k', ']', '<=', 'pars', '[', 'max_key', ']', ':', 'x_tail_check_for_SCAT', '.', 'append', '(', 'x_tail_check', '[', 'k', ']', ')', 'y_tail_check_for_SCAT', '.', 'append', '(', 'y_tail_check', '[', 'k', ']', ')', 'x_tail_check_for_SCAT', '=', 'np', '.', 'array', '(', 'x_tail_check_for_SCAT', ')', 'y_tail_check_for_SCAT', '=', 'np', '.', 'array', '(', 'y_tail_check_for_SCAT', ')', '# --------------------------------------------------------------', '# calculate the lines that define the scat box:', '# if threshold value for beta is not defined, then scat cannot be calculated (pass)', '# in this case, scat pass', 'if', 'beta_key', 'in', 'list', '(', 'accept', '.', 'keys', '(', ')', ')', 'and', 'accept', '[', 'beta_key', ']', '!=', '""', ':', 'b_beta_threshold', '=', 'float', '(', 'accept', '[', 'beta_key', ']', ')', 'b', '=', 'pars', '[', 'b_key', ']', '# best fit line', 'cm_x', '=', 'np', '.', 'mean', '(', 'np', '.', 'array', '(', 'x_Arai_segment', ')', ')', '# x center of mass', 'cm_y', '=', 'np', '.', 'mean', '(', 'np', '.', 'array', '(', 'y_Arai_segment', ')', ')', '# y center of mass', 'a', '=', 'cm_y', '-', 'b', '*', 'cm_x', '# lines with slope = slope +/- 2*(specimen_b_beta)', 'two_sigma_beta_threshold', '=', '2', '*', 'b_beta_threshold', 'two_sigma_slope_threshold', '=', 'abs', '(', 'two_sigma_beta_threshold', '*', 'b', ')', '# a line with a shallower slope (b + 2*beta*b) passing through the center of mass', '# y=a1+b1x', 'b1', '=', 'b', '+', 'two_sigma_slope_threshold', 'a1', '=', 'cm_y', '-', 'b1', '*', 'cm_x', '# bounding line with steeper slope (b - 2*beta*b) passing through the center of mass', '# y=a2+b2x', 'b2', '=', 'b', '-', 'two_sigma_slope_threshold', 'a2', '=', 'cm_y', '-', 'b2', '*', 'cm_x', "# lower bounding line of the 'beta box'", '# y=intercept1+slop1x', 'slop1', '=', 'old_div', '(', 'a1', ',', '(', '(', 'old_div', '(', 'a2', ',', 'b2', ')', ')', ')', ')', 'intercept1', '=', 'a1', "# higher bounding line of the 'beta box'", '# y=intercept2+slop2x', 'slop2', '=', 'old_div', '(', 'a2', ',', '(', '(', 'old_div', '(', 'a1', ',', 'b1', ')', ')', ')', ')', 'intercept2', '=', 'a2', 'pars', '[', "'specimen_scat_bounding_line_high'", ']', '=', '[', 'intercept2', ',', 'slop2', ']', 'pars', '[', "'specimen_scat_bounding_line_low'", ']', '=', '[', 'intercept1', ',', 'slop1', ']', '# --------------------------------------------------------------', "# check if the Arai data points are in the 'box'", '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_Arai_segment', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_Arai_segment', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_Arai_segment', '>', 'ymax', 'check_2', '=', 'y_Arai_segment', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', "# check if the pTRM checks data points are in the 'box'", 'if', 'len', '(', 'x_ptrm_check_for_SCAT', ')', '>', '0', ':', '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_ptrm_check_for_SCAT', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_ptrm_check_for_SCAT', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_ptrm_check_for_SCAT', '>', 'ymax', 'check_2', '=', 'y_ptrm_check_for_SCAT', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', "# check if the tail checks data points are in the 'box'", 'if', 'len', '(', 'x_tail_check_for_SCAT', ')', '>', '0', ':', '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_tail_check_for_SCAT', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_tail_check_for_SCAT', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_tail_check_for_SCAT', '>', 'ymax', 'check_2', '=', 'y_tail_check_for_SCAT', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', '# check if specimen_scat is PASS or FAIL:', 'if', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', 'or', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', 'or', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', ':', 'pars', '[', 'scat_key', ']', '=', "'f'", 'else', ':', 'pars', '[', 'scat_key', ']', '=', "'t'", 'return', 'pars', ',', '0']
calculate the paleointensity magic parameters make some definitions
['calculate', 'the', 'paleointensity', 'magic', 'parameters', 'make', 'some', 'definitions']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L2827-L3374
10
lowandrew/OLCTools
spadespipeline/vtyper.py
Vtyper.epcrparse
def epcrparse(self): """ Parse the ePCR text file outputs """ logging.info('Parsing ePCR results') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': if 'stx' in sample.general.datastore: # Initialise count - this allows for the population of vtyperresults with unique values uniquecount = 0 # This populates vtyperresults with the verotoxin subtypes toxinlist = [] if os.path.isfile(sample[self.analysistype].resultsfile): epcrresults = open(sample[self.analysistype].resultsfile, 'r') for result in epcrresults: # Only the lines without a # contain results if "#" not in result: uniquecount += 1 # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Push the name of the primer pair - stripped of anything after a _ to the dictionary if vttype not in toxinlist: toxinlist.append(vttype) # Create a string of the entries in list1 joined with ";" toxinstring = ";".join(sorted(toxinlist)) # Save the string to the metadata sample[self.analysistype].toxinprofile = toxinstring else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'
python
def epcrparse(self): """ Parse the ePCR text file outputs """ logging.info('Parsing ePCR results') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': if 'stx' in sample.general.datastore: # Initialise count - this allows for the population of vtyperresults with unique values uniquecount = 0 # This populates vtyperresults with the verotoxin subtypes toxinlist = [] if os.path.isfile(sample[self.analysistype].resultsfile): epcrresults = open(sample[self.analysistype].resultsfile, 'r') for result in epcrresults: # Only the lines without a # contain results if "#" not in result: uniquecount += 1 # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Push the name of the primer pair - stripped of anything after a _ to the dictionary if vttype not in toxinlist: toxinlist.append(vttype) # Create a string of the entries in list1 joined with ";" toxinstring = ";".join(sorted(toxinlist)) # Save the string to the metadata sample[self.analysistype].toxinprofile = toxinstring else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'
['def', 'epcrparse', '(', 'self', ')', ':', 'logging', '.', 'info', '(', "'Parsing ePCR results'", ')', 'for', 'sample', 'in', 'self', '.', 'metadata', ':', 'if', 'sample', '.', 'general', '.', 'bestassemblyfile', '!=', "'NA'", ':', 'if', "'stx'", 'in', 'sample', '.', 'general', '.', 'datastore', ':', '# Initialise count - this allows for the population of vtyperresults with unique values', 'uniquecount', '=', '0', '# This populates vtyperresults with the verotoxin subtypes', 'toxinlist', '=', '[', ']', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'resultsfile', ')', ':', 'epcrresults', '=', 'open', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'resultsfile', ',', "'r'", ')', 'for', 'result', 'in', 'epcrresults', ':', '# Only the lines without a # contain results', 'if', '"#"', 'not', 'in', 'result', ':', 'uniquecount', '+=', '1', '# Split on \\t', 'data', '=', 'result', '.', 'split', '(', "'\\t'", ')', '# The subtyping primer pair is the first entry on lines with results', 'vttype', '=', 'data', '[', '0', ']', '.', 'split', '(', "'_'", ')', '[', '0', ']', '# Push the name of the primer pair - stripped of anything after a _ to the dictionary', 'if', 'vttype', 'not', 'in', 'toxinlist', ':', 'toxinlist', '.', 'append', '(', 'vttype', ')', '# Create a string of the entries in list1 joined with ";"', 'toxinstring', '=', '";"', '.', 'join', '(', 'sorted', '(', 'toxinlist', ')', ')', '# Save the string to the metadata', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', 'toxinstring', 'else', ':', 'setattr', '(', 'sample', ',', 'self', '.', 'analysistype', ',', 'GenObject', '(', ')', ')', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', "'NA'", 'else', ':', 'setattr', '(', 'sample', ',', 'self', '.', 'analysistype', ',', 'GenObject', '(', ')', ')', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', "'NA'"]
Parse the ePCR text file outputs
['Parse', 'the', 'ePCR', 'text', 'file', 'outputs']
train
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/vtyper.py#L96-L131
11
atztogo/phonopy
phonopy/structure/spglib.py
get_pointgroup
def get_pointgroup(rotations): """Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m " """ _set_no_error() # (symbol, pointgroup_number, transformation_matrix) pointgroup = spg.pointgroup(np.array(rotations, dtype='intc', order='C')) _set_error_message() return pointgroup
python
def get_pointgroup(rotations): """Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m " """ _set_no_error() # (symbol, pointgroup_number, transformation_matrix) pointgroup = spg.pointgroup(np.array(rotations, dtype='intc', order='C')) _set_error_message() return pointgroup
['def', 'get_pointgroup', '(', 'rotations', ')', ':', '_set_no_error', '(', ')', '# (symbol, pointgroup_number, transformation_matrix)', 'pointgroup', '=', 'spg', '.', 'pointgroup', '(', 'np', '.', 'array', '(', 'rotations', ',', 'dtype', '=', "'intc'", ',', 'order', '=', "'C'", ')', ')', '_set_error_message', '(', ')', 'return', 'pointgroup']
Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m "
['Return', 'point', 'group', 'in', 'international', 'table', 'symbol', 'and', 'number', '.']
train
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/spglib.py#L301-L343
12
automl/HpBandSter
hpbandster/core/nameserver.py
NameServer.start
def start(self): """ starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port """ if self.host is None: if self.nic_name is None: self.host = 'localhost' else: self.host = nic_name_to_host(self.nic_name) uri, self.pyro_ns, _ = Pyro4.naming.startNS(host=self.host, port=self.port) self.host, self.port = self.pyro_ns.locationStr.split(':') self.port = int(self.port) thread = threading.Thread(target=self.pyro_ns.requestLoop, name='Pyro4 nameserver started by HpBandSter') thread.start() if not self.dir is None: os.makedirs(self.dir, exist_ok=True) self.conf_fn = os.path.join(self.dir, 'HPB_run_%s_pyro.pkl'%self.run_id) with open(self.conf_fn, 'wb') as fh: pickle.dump((self.host, self.port), fh) return(self.host, self.port)
python
def start(self): """ starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port """ if self.host is None: if self.nic_name is None: self.host = 'localhost' else: self.host = nic_name_to_host(self.nic_name) uri, self.pyro_ns, _ = Pyro4.naming.startNS(host=self.host, port=self.port) self.host, self.port = self.pyro_ns.locationStr.split(':') self.port = int(self.port) thread = threading.Thread(target=self.pyro_ns.requestLoop, name='Pyro4 nameserver started by HpBandSter') thread.start() if not self.dir is None: os.makedirs(self.dir, exist_ok=True) self.conf_fn = os.path.join(self.dir, 'HPB_run_%s_pyro.pkl'%self.run_id) with open(self.conf_fn, 'wb') as fh: pickle.dump((self.host, self.port), fh) return(self.host, self.port)
['def', 'start', '(', 'self', ')', ':', 'if', 'self', '.', 'host', 'is', 'None', ':', 'if', 'self', '.', 'nic_name', 'is', 'None', ':', 'self', '.', 'host', '=', "'localhost'", 'else', ':', 'self', '.', 'host', '=', 'nic_name_to_host', '(', 'self', '.', 'nic_name', ')', 'uri', ',', 'self', '.', 'pyro_ns', ',', '_', '=', 'Pyro4', '.', 'naming', '.', 'startNS', '(', 'host', '=', 'self', '.', 'host', ',', 'port', '=', 'self', '.', 'port', ')', 'self', '.', 'host', ',', 'self', '.', 'port', '=', 'self', '.', 'pyro_ns', '.', 'locationStr', '.', 'split', '(', "':'", ')', 'self', '.', 'port', '=', 'int', '(', 'self', '.', 'port', ')', 'thread', '=', 'threading', '.', 'Thread', '(', 'target', '=', 'self', '.', 'pyro_ns', '.', 'requestLoop', ',', 'name', '=', "'Pyro4 nameserver started by HpBandSter'", ')', 'thread', '.', 'start', '(', ')', 'if', 'not', 'self', '.', 'dir', 'is', 'None', ':', 'os', '.', 'makedirs', '(', 'self', '.', 'dir', ',', 'exist_ok', '=', 'True', ')', 'self', '.', 'conf_fn', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'dir', ',', "'HPB_run_%s_pyro.pkl'", '%', 'self', '.', 'run_id', ')', 'with', 'open', '(', 'self', '.', 'conf_fn', ',', "'wb'", ')', 'as', 'fh', ':', 'pickle', '.', 'dump', '(', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', ',', 'fh', ')', 'return', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')']
starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port
['starts', 'a', 'Pyro4', 'nameserver', 'in', 'a', 'separate', 'thread', 'Returns', '-------', 'tuple', '(', 'str', 'int', ')', ':', 'the', 'host', 'name', 'and', 'the', 'used', 'port']
train
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/nameserver.py#L48-L79
13
PixelwarStudio/PyTree
Tree/core.py
Tree.grow
def grow(self, times=1): """Let the tree grow. Args: times (integer): Indicate how many times the tree will grow. """ self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)
python
def grow(self, times=1): """Let the tree grow. Args: times (integer): Indicate how many times the tree will grow. """ self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)
['def', 'grow', '(', 'self', ',', 'times', '=', '1', ')', ':', 'self', '.', 'nodes', '.', 'append', '(', '[', ']', ')', 'for', 'n', ',', 'node', 'in', 'enumerate', '(', 'self', '.', 'nodes', '[', 'self', '.', 'age', ']', ')', ':', 'if', 'self', '.', 'age', '==', '0', ':', 'p_node', '=', 'Node', '(', 'self', '.', 'pos', '[', ':', '2', ']', ')', 'else', ':', 'p_node', '=', 'self', '.', '_get_node_parent', '(', 'self', '.', 'age', '-', '1', ',', 'n', ')', 'angle', '=', 'node', '.', 'get_node_angle', '(', 'p_node', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'comp', ')', ':', 'tot_angle', '=', 'self', '.', '__get_total_angle', '(', 'angle', ',', 'i', ')', 'length', '=', 'self', '.', '__get_total_length', '(', 'self', '.', 'age', '+', '1', ',', 'i', ')', 'self', '.', 'nodes', '[', 'self', '.', 'age', '+', '1', ']', '.', 'append', '(', 'node', '.', 'make_new_node', '(', 'length', ',', 'tot_angle', ')', ')', 'self', '.', 'age', '+=', '1', 'if', 'times', '>', '1', ':', 'self', '.', 'grow', '(', 'times', '-', '1', ')']
Let the tree grow. Args: times (integer): Indicate how many times the tree will grow.
['Let', 'the', 'tree', 'grow', '.']
train
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L167-L189
14
saltstack/salt
salt/modules/osquery.py
kernel_integrity
def kernel_integrity(attrs=None, where=None): ''' Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity ''' if __grains__['os_family'] in ['RedHat', 'Debian']: return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
python
def kernel_integrity(attrs=None, where=None): ''' Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity ''' if __grains__['os_family'] in ['RedHat', 'Debian']: return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
['def', 'kernel_integrity', '(', 'attrs', '=', 'None', ',', 'where', '=', 'None', ')', ':', 'if', '__grains__', '[', "'os_family'", ']', 'in', '[', "'RedHat'", ',', "'Debian'", ']', ':', 'return', '_osquery_cmd', '(', 'table', '=', "'kernel_integrity'", ',', 'attrs', '=', 'attrs', ',', 'where', '=', 'where', ')', 'return', '{', "'result'", ':', 'False', ',', "'comment'", ':', "'Only available on Red Hat or Debian based systems.'", '}']
Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity
['Return', 'kernel_integrity', 'information', 'from', 'osquery']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/osquery.py#L149-L161
15
rackerlabs/lambda-uploader
lambda_uploader/package.py
Package.virtualenv
def virtualenv(self, virtualenv): ''' Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped. ''' # If a boolean is passed then set the internal _skip_virtualenv flag if isinstance(virtualenv, bool): self._skip_virtualenv = virtualenv else: self._virtualenv = virtualenv if not os.path.isdir(self._virtualenv): raise Exception("virtualenv %s not found" % self._virtualenv) LOG.info("Using existing virtualenv at %s" % self._virtualenv) # use supplied virtualenv path self._pkg_venv = self._virtualenv self._skip_virtualenv = True
python
def virtualenv(self, virtualenv): ''' Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped. ''' # If a boolean is passed then set the internal _skip_virtualenv flag if isinstance(virtualenv, bool): self._skip_virtualenv = virtualenv else: self._virtualenv = virtualenv if not os.path.isdir(self._virtualenv): raise Exception("virtualenv %s not found" % self._virtualenv) LOG.info("Using existing virtualenv at %s" % self._virtualenv) # use supplied virtualenv path self._pkg_venv = self._virtualenv self._skip_virtualenv = True
['def', 'virtualenv', '(', 'self', ',', 'virtualenv', ')', ':', '# If a boolean is passed then set the internal _skip_virtualenv flag', 'if', 'isinstance', '(', 'virtualenv', ',', 'bool', ')', ':', 'self', '.', '_skip_virtualenv', '=', 'virtualenv', 'else', ':', 'self', '.', '_virtualenv', '=', 'virtualenv', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'self', '.', '_virtualenv', ')', ':', 'raise', 'Exception', '(', '"virtualenv %s not found"', '%', 'self', '.', '_virtualenv', ')', 'LOG', '.', 'info', '(', '"Using existing virtualenv at %s"', '%', 'self', '.', '_virtualenv', ')', '# use supplied virtualenv path', 'self', '.', '_pkg_venv', '=', 'self', '.', '_virtualenv', 'self', '.', '_skip_virtualenv', '=', 'True']
Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped.
['Sets', 'the', 'virtual', 'environment', 'for', 'the', 'lambda', 'package']
train
https://github.com/rackerlabs/lambda-uploader/blob/a5036e60d45d1a4fdc07df071f5b6e3b113388d4/lambda_uploader/package.py#L114-L133
16
agoragames/kairos
kairos/cassandra_backend.py
CassandraSet._insert_stmt
def _insert_stmt(self, name, value, timestamp, interval, config): '''Helper to generate the insert statement.''' # Calculate the TTL and abort if inserting into the past expire, ttl = config['expire'], config['ttl'](timestamp) if expire and not ttl: return None i_time = config['i_calc'].to_bucket(timestamp) if not config['coarse']: r_time = config['r_calc'].to_bucket(timestamp) else: r_time = -1 # TODO: figure out escaping rules of CQL stmt = '''INSERT INTO %s (name, interval, i_time, r_time, value) VALUES ('%s', '%s', %s, %s, %s)'''%(self._table, name, interval, i_time, r_time, value) expire = config['expire'] if ttl: stmt += " USING TTL %s"%(ttl) return stmt
python
def _insert_stmt(self, name, value, timestamp, interval, config): '''Helper to generate the insert statement.''' # Calculate the TTL and abort if inserting into the past expire, ttl = config['expire'], config['ttl'](timestamp) if expire and not ttl: return None i_time = config['i_calc'].to_bucket(timestamp) if not config['coarse']: r_time = config['r_calc'].to_bucket(timestamp) else: r_time = -1 # TODO: figure out escaping rules of CQL stmt = '''INSERT INTO %s (name, interval, i_time, r_time, value) VALUES ('%s', '%s', %s, %s, %s)'''%(self._table, name, interval, i_time, r_time, value) expire = config['expire'] if ttl: stmt += " USING TTL %s"%(ttl) return stmt
['def', '_insert_stmt', '(', 'self', ',', 'name', ',', 'value', ',', 'timestamp', ',', 'interval', ',', 'config', ')', ':', '# Calculate the TTL and abort if inserting into the past', 'expire', ',', 'ttl', '=', 'config', '[', "'expire'", ']', ',', 'config', '[', "'ttl'", ']', '(', 'timestamp', ')', 'if', 'expire', 'and', 'not', 'ttl', ':', 'return', 'None', 'i_time', '=', 'config', '[', "'i_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', 'if', 'not', 'config', '[', "'coarse'", ']', ':', 'r_time', '=', 'config', '[', "'r_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', 'else', ':', 'r_time', '=', '-', '1', '# TODO: figure out escaping rules of CQL', 'stmt', '=', "'''INSERT INTO %s (name, interval, i_time, r_time, value)\n VALUES ('%s', '%s', %s, %s, %s)'''", '%', '(', 'self', '.', '_table', ',', 'name', ',', 'interval', ',', 'i_time', ',', 'r_time', ',', 'value', ')', 'expire', '=', 'config', '[', "'expire'", ']', 'if', 'ttl', ':', 'stmt', '+=', '" USING TTL %s"', '%', '(', 'ttl', ')', 'return', 'stmt']
Helper to generate the insert statement.
['Helper', 'to', 'generate', 'the', 'insert', 'statement', '.']
train
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/cassandra_backend.py#L646-L665
17
pavelsof/ipalint
ipalint/read.py
Reader._determine_dialect
def _determine_dialect(self, lines): """ Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method. """ permuts = [(quotechar, escapechar) for quotechar in CSV_QUOTECHARS for escapechar in CSV_ESCAPECHARS] for delim in CSV_DELIMITERS: counts = [line.count(delim) for line in lines] if min(counts) == 0: continue for quotechar, escapechar in permuts: doublequote = True if escapechar is None else False reader = csv.reader(lines, delimiter=delim, quotechar=quotechar, doublequote=doublequote, escapechar=escapechar) try: assert len(set([len(line) for line in reader])) == 1 except AssertionError: continue else: break else: continue # no suitable quoting found break # found it! else: return None return Dialect(delim, quotechar, doublequote, escapechar)
python
def _determine_dialect(self, lines): """ Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method. """ permuts = [(quotechar, escapechar) for quotechar in CSV_QUOTECHARS for escapechar in CSV_ESCAPECHARS] for delim in CSV_DELIMITERS: counts = [line.count(delim) for line in lines] if min(counts) == 0: continue for quotechar, escapechar in permuts: doublequote = True if escapechar is None else False reader = csv.reader(lines, delimiter=delim, quotechar=quotechar, doublequote=doublequote, escapechar=escapechar) try: assert len(set([len(line) for line in reader])) == 1 except AssertionError: continue else: break else: continue # no suitable quoting found break # found it! else: return None return Dialect(delim, quotechar, doublequote, escapechar)
['def', '_determine_dialect', '(', 'self', ',', 'lines', ')', ':', 'permuts', '=', '[', '(', 'quotechar', ',', 'escapechar', ')', 'for', 'quotechar', 'in', 'CSV_QUOTECHARS', 'for', 'escapechar', 'in', 'CSV_ESCAPECHARS', ']', 'for', 'delim', 'in', 'CSV_DELIMITERS', ':', 'counts', '=', '[', 'line', '.', 'count', '(', 'delim', ')', 'for', 'line', 'in', 'lines', ']', 'if', 'min', '(', 'counts', ')', '==', '0', ':', 'continue', 'for', 'quotechar', ',', 'escapechar', 'in', 'permuts', ':', 'doublequote', '=', 'True', 'if', 'escapechar', 'is', 'None', 'else', 'False', 'reader', '=', 'csv', '.', 'reader', '(', 'lines', ',', 'delimiter', '=', 'delim', ',', 'quotechar', '=', 'quotechar', ',', 'doublequote', '=', 'doublequote', ',', 'escapechar', '=', 'escapechar', ')', 'try', ':', 'assert', 'len', '(', 'set', '(', '[', 'len', '(', 'line', ')', 'for', 'line', 'in', 'reader', ']', ')', ')', '==', '1', 'except', 'AssertionError', ':', 'continue', 'else', ':', 'break', 'else', ':', 'continue', '# no suitable quoting found', 'break', '# found it!', 'else', ':', 'return', 'None', 'return', 'Dialect', '(', 'delim', ',', 'quotechar', ',', 'doublequote', ',', 'escapechar', ')']
Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method.
['Expects', 'a', 'non', '-', 'empty', '[]', 'of', 'strings', ';', 'these', 'would', 'normally', 'be', 'the', 'first', 'few', 'lines', 'of', 'a', 'csv', 'file', '.', 'Returns', 'the', 'most', 'likely', 'Dialect', 'named', 'tuple', 'or', 'None', 'if', 'the', 'data', 'seems', 'to', 'form', 'a', 'single', 'column', '.']
train
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L177-L218
18
angr/angr
angr/utils/graph.py
compute_dominance_frontier
def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df
python
def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df
['def', 'compute_dominance_frontier', '(', 'graph', ',', 'domtree', ')', ':', 'df', '=', '{', '}', '# Perform a post-order search on the dominator tree', 'for', 'x', 'in', 'networkx', '.', 'dfs_postorder_nodes', '(', 'domtree', ')', ':', 'if', 'x', 'not', 'in', 'graph', ':', '# Skip nodes that are not in the graph', 'continue', 'df', '[', 'x', ']', '=', 'set', '(', ')', '# local set', 'for', 'y', 'in', 'graph', '.', 'successors', '(', 'x', ')', ':', 'if', 'x', 'not', 'in', 'domtree', '.', 'predecessors', '(', 'y', ')', ':', 'df', '[', 'x', ']', '.', 'add', '(', 'y', ')', '# up set', 'if', 'x', 'is', 'None', ':', 'continue', 'for', 'z', 'in', 'domtree', '.', 'successors', '(', 'x', ')', ':', 'if', 'z', 'is', 'x', ':', 'continue', 'if', 'z', 'not', 'in', 'df', ':', 'continue', 'for', 'y', 'in', 'df', '[', 'z', ']', ':', 'if', 'x', 'not', 'in', 'list', '(', 'domtree', '.', 'predecessors', '(', 'y', ')', ')', ':', 'df', '[', 'x', ']', '.', 'add', '(', 'y', ')', 'return', 'df']
Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier
['Compute', 'a', 'dominance', 'frontier', 'based', 'on', 'the', 'given', 'post', '-', 'dominator', 'tree', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/utils/graph.py#L63-L104
19
CI-WATER/mapkit
mapkit/RasterConverter.py
RasterConverter.isNumber
def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False
python
def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False
['def', 'isNumber', '(', 'self', ',', 'value', ')', ':', 'try', ':', 'str', '(', 'value', ')', 'float', '(', 'value', ')', 'return', 'True', 'except', 'ValueError', ':', 'return', 'False']
Validate whether a value is a number or not
['Validate', 'whether', 'a', 'value', 'is', 'a', 'number', 'or', 'not']
train
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterConverter.py#L1097-L1107
20
CamDavidsonPilon/lifelines
lifelines/utils/__init__.py
survival_table_from_events
def survival_table_from_events( death_times, event_observed, birth_times=None, columns=["removed", "observed", "censored", "entrance", "at_risk"], weights=None, collapse=False, intervals=None, ): # pylint: disable=dangerous-default-value,too-many-locals """ Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events """ removed, observed, censored, entrance, at_risk = columns death_times = np.asarray(death_times) if birth_times is None: birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0]) else: birth_times = np.asarray(birth_times) if np.any(birth_times > death_times): raise ValueError("birth time must be less than time of death.") if weights is None: weights = 1 # deal with deaths and censorships df = pd.DataFrame(death_times, columns=["event_at"]) df[removed] = np.asarray(weights) df[observed] = np.asarray(weights) * (np.asarray(event_observed).astype(bool)) death_table = df.groupby("event_at").sum() death_table[censored] = (death_table[removed] - death_table[observed]).astype(int) # deal with late births births = pd.DataFrame(birth_times, columns=["event_at"]) births[entrance] = np.asarray(weights) births_table = births.groupby("event_at").sum() event_table = death_table.join(births_table, how="outer", sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414 event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0) # group by intervals if (collapse) or (intervals is not None): event_table = _group_event_table_by_intervals(event_table, intervals) if (np.asarray(weights).astype(int) != weights).any(): return event_table.astype(float) return event_table.astype(int)
python
def survival_table_from_events( death_times, event_observed, birth_times=None, columns=["removed", "observed", "censored", "entrance", "at_risk"], weights=None, collapse=False, intervals=None, ): # pylint: disable=dangerous-default-value,too-many-locals """ Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events """ removed, observed, censored, entrance, at_risk = columns death_times = np.asarray(death_times) if birth_times is None: birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0]) else: birth_times = np.asarray(birth_times) if np.any(birth_times > death_times): raise ValueError("birth time must be less than time of death.") if weights is None: weights = 1 # deal with deaths and censorships df = pd.DataFrame(death_times, columns=["event_at"]) df[removed] = np.asarray(weights) df[observed] = np.asarray(weights) * (np.asarray(event_observed).astype(bool)) death_table = df.groupby("event_at").sum() death_table[censored] = (death_table[removed] - death_table[observed]).astype(int) # deal with late births births = pd.DataFrame(birth_times, columns=["event_at"]) births[entrance] = np.asarray(weights) births_table = births.groupby("event_at").sum() event_table = death_table.join(births_table, how="outer", sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414 event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0) # group by intervals if (collapse) or (intervals is not None): event_table = _group_event_table_by_intervals(event_table, intervals) if (np.asarray(weights).astype(int) != weights).any(): return event_table.astype(float) return event_table.astype(int)
['def', 'survival_table_from_events', '(', 'death_times', ',', 'event_observed', ',', 'birth_times', '=', 'None', ',', 'columns', '=', '[', '"removed"', ',', '"observed"', ',', '"censored"', ',', '"entrance"', ',', '"at_risk"', ']', ',', 'weights', '=', 'None', ',', 'collapse', '=', 'False', ',', 'intervals', '=', 'None', ',', ')', ':', '# pylint: disable=dangerous-default-value,too-many-locals', 'removed', ',', 'observed', ',', 'censored', ',', 'entrance', ',', 'at_risk', '=', 'columns', 'death_times', '=', 'np', '.', 'asarray', '(', 'death_times', ')', 'if', 'birth_times', 'is', 'None', ':', 'birth_times', '=', 'min', '(', '0', ',', 'death_times', '.', 'min', '(', ')', ')', '*', 'np', '.', 'ones', '(', 'death_times', '.', 'shape', '[', '0', ']', ')', 'else', ':', 'birth_times', '=', 'np', '.', 'asarray', '(', 'birth_times', ')', 'if', 'np', '.', 'any', '(', 'birth_times', '>', 'death_times', ')', ':', 'raise', 'ValueError', '(', '"birth time must be less than time of death."', ')', 'if', 'weights', 'is', 'None', ':', 'weights', '=', '1', '# deal with deaths and censorships', 'df', '=', 'pd', '.', 'DataFrame', '(', 'death_times', ',', 'columns', '=', '[', '"event_at"', ']', ')', 'df', '[', 'removed', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', 'df', '[', 'observed', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', '*', '(', 'np', '.', 'asarray', '(', 'event_observed', ')', '.', 'astype', '(', 'bool', ')', ')', 'death_table', '=', 'df', '.', 'groupby', '(', '"event_at"', ')', '.', 'sum', '(', ')', 'death_table', '[', 'censored', ']', '=', '(', 'death_table', '[', 'removed', ']', '-', 'death_table', '[', 'observed', ']', ')', '.', 'astype', '(', 'int', ')', '# deal with late births', 'births', '=', 'pd', '.', 'DataFrame', '(', 'birth_times', ',', 'columns', '=', '[', '"event_at"', ']', ')', 'births', '[', 'entrance', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', 'births_table', '=', 'births', '.', 'groupby', '(', '"event_at"', ')', '.', 'sum', '(', ')', 'event_table', '=', 'death_table', '.', 'join', '(', 'births_table', ',', 'how', '=', '"outer"', ',', 'sort', '=', 'True', ')', '.', 'fillna', '(', '0', ')', '# http://wesmckinney.com/blog/?p=414', 'event_table', '[', 'at_risk', ']', '=', 'event_table', '[', 'entrance', ']', '.', 'cumsum', '(', ')', '-', 'event_table', '[', 'removed', ']', '.', 'cumsum', '(', ')', '.', 'shift', '(', '1', ')', '.', 'fillna', '(', '0', ')', '# group by intervals', 'if', '(', 'collapse', ')', 'or', '(', 'intervals', 'is', 'not', 'None', ')', ':', 'event_table', '=', '_group_event_table_by_intervals', '(', 'event_table', ',', 'intervals', ')', 'if', '(', 'np', '.', 'asarray', '(', 'weights', ')', '.', 'astype', '(', 'int', ')', '!=', 'weights', ')', '.', 'any', '(', ')', ':', 'return', 'event_table', '.', 'astype', '(', 'float', ')', 'return', 'event_table', '.', 'astype', '(', 'int', ')']
Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events
['Parameters', '----------', 'death_times', ':', '(', 'n', ')', 'array', 'represent', 'the', 'event', 'times', 'event_observed', ':', '(', 'n', ')', 'array', '1', 'if', 'observed', 'event', '0', 'is', 'censored', 'event', '.', 'birth_times', ':', 'a', '(', 'n', ')', 'array', 'optional', 'representing', 'when', 'the', 'subject', 'was', 'first', 'observed', '.', 'A', 'subject', 's', 'death', 'event', 'is', 'then', 'at', '[', 'birth', 'times', '+', 'duration', 'observed', ']', '.', 'If', 'None', '(', 'default', ')', 'birth_times', 'are', 'set', 'to', 'be', 'the', 'first', 'observation', 'or', '0', 'which', 'ever', 'is', 'smaller', '.', 'columns', ':', 'iterable', 'optional', 'a', '3', '-', 'length', 'array', 'to', 'call', 'the', 'in', 'order', 'removed', 'individuals', 'observed', 'deaths', 'and', 'censorships', '.', 'weights', ':', '(', 'n', '1', ')', 'array', 'optional', 'Optional', 'argument', 'to', 'use', 'weights', 'for', 'individuals', '.', 'Assumes', 'weights', 'of', '1', 'if', 'not', 'provided', '.', 'collapse', ':', 'boolean', 'optional', '(', 'default', '=', 'False', ')', 'If', 'True', 'collapses', 'survival', 'table', 'into', 'lifetable', 'to', 'show', 'events', 'in', 'interval', 'bins', 'intervals', ':', 'iterable', 'optional', 'Default', 'None', 'otherwise', 'a', 'list', '/', '(', 'n', '1', ')', 'array', 'of', 'interval', 'edge', 'measures', '.', 'If', 'left', 'as', 'None', 'while', 'collapse', '=', 'True', 'then', 'Freedman', '-', 'Diaconis', 'rule', 'for', 'histogram', 'bins', 'will', 'be', 'used', 'to', 'determine', 'intervals', '.']
train
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/__init__.py#L262-L361
21
django-blog-zinnia/cmsplugin-zinnia
cmsplugin_zinnia/cms_plugins.py
CMSRandomEntriesPlugin.render
def render(self, context, instance, placeholder): """ Update the context with plugin's data """ context = super(CMSRandomEntriesPlugin, self).render( context, instance, placeholder) context['template_to_render'] = (str(instance.template_to_render) or 'zinnia/tags/entries_random.html') return context
python
def render(self, context, instance, placeholder): """ Update the context with plugin's data """ context = super(CMSRandomEntriesPlugin, self).render( context, instance, placeholder) context['template_to_render'] = (str(instance.template_to_render) or 'zinnia/tags/entries_random.html') return context
['def', 'render', '(', 'self', ',', 'context', ',', 'instance', ',', 'placeholder', ')', ':', 'context', '=', 'super', '(', 'CMSRandomEntriesPlugin', ',', 'self', ')', '.', 'render', '(', 'context', ',', 'instance', ',', 'placeholder', ')', 'context', '[', "'template_to_render'", ']', '=', '(', 'str', '(', 'instance', '.', 'template_to_render', ')', 'or', "'zinnia/tags/entries_random.html'", ')', 'return', 'context']
Update the context with plugin's data
['Update', 'the', 'context', 'with', 'plugin', 's', 'data']
train
https://github.com/django-blog-zinnia/cmsplugin-zinnia/blob/7613c0d9ae29affe9ab97527e4b6d5bef124afdc/cmsplugin_zinnia/cms_plugins.py#L131-L139
22
rjw57/throw
throw/minus/minus.py
Gallery.SaveGallery
def SaveGallery(self, name=None, items=None): """Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change.""" url = 'http://min.us/api/SaveGallery' if not name: if not self.name: name = self.GetItems()[0] if self.name: name = self.name if not items: if not self.items: items = self.GetItems()[1] elif self.items: items = self.items params = {"name": name, "id":self.editor_id, "items":items} try: response = _dopost(url, params) except: pass else: self.name = name self.items = items
python
def SaveGallery(self, name=None, items=None): """Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change.""" url = 'http://min.us/api/SaveGallery' if not name: if not self.name: name = self.GetItems()[0] if self.name: name = self.name if not items: if not self.items: items = self.GetItems()[1] elif self.items: items = self.items params = {"name": name, "id":self.editor_id, "items":items} try: response = _dopost(url, params) except: pass else: self.name = name self.items = items
['def', 'SaveGallery', '(', 'self', ',', 'name', '=', 'None', ',', 'items', '=', 'None', ')', ':', 'url', '=', "'http://min.us/api/SaveGallery'", 'if', 'not', 'name', ':', 'if', 'not', 'self', '.', 'name', ':', 'name', '=', 'self', '.', 'GetItems', '(', ')', '[', '0', ']', 'if', 'self', '.', 'name', ':', 'name', '=', 'self', '.', 'name', 'if', 'not', 'items', ':', 'if', 'not', 'self', '.', 'items', ':', 'items', '=', 'self', '.', 'GetItems', '(', ')', '[', '1', ']', 'elif', 'self', '.', 'items', ':', 'items', '=', 'self', '.', 'items', 'params', '=', '{', '"name"', ':', 'name', ',', '"id"', ':', 'self', '.', 'editor_id', ',', '"items"', ':', 'items', '}', 'try', ':', 'response', '=', '_dopost', '(', 'url', ',', 'params', ')', 'except', ':', 'pass', 'else', ':', 'self', '.', 'name', '=', 'name', 'self', '.', 'items', '=', 'items']
Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change.
['Use', 'this', 'to', 'update', 'the', 'gallery', 'name', 'or', 'change', 'sort', 'order', '.', 'Specify', 'which', 'attribute', '(', 'name', 'or', 'items', 'or', 'both', ')', 'you', 'want', 'to', 'change', '.']
train
https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/minus/minus.py#L62-L88
23
letuananh/chirptext
chirptext/cli.py
setup_logging
def setup_logging(filename, log_dir=None, force_setup=False): ''' Try to load logging configuration from a file. Set level to INFO if failed. ''' if not force_setup and ChirpCLI.SETUP_COMPLETED: logging.debug("Master logging has been setup. This call will be ignored.") return if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) if os.path.isfile(filename): with open(filename) as config_file: try: config = json.load(config_file) logging.config.dictConfig(config) logging.info("logging was setup using {}".format(filename)) ChirpCLI.SETUP_COMPLETED = True except Exception as e: logging.exception("Could not load logging config") # default logging config logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.INFO)
python
def setup_logging(filename, log_dir=None, force_setup=False): ''' Try to load logging configuration from a file. Set level to INFO if failed. ''' if not force_setup and ChirpCLI.SETUP_COMPLETED: logging.debug("Master logging has been setup. This call will be ignored.") return if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) if os.path.isfile(filename): with open(filename) as config_file: try: config = json.load(config_file) logging.config.dictConfig(config) logging.info("logging was setup using {}".format(filename)) ChirpCLI.SETUP_COMPLETED = True except Exception as e: logging.exception("Could not load logging config") # default logging config logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.INFO)
['def', 'setup_logging', '(', 'filename', ',', 'log_dir', '=', 'None', ',', 'force_setup', '=', 'False', ')', ':', 'if', 'not', 'force_setup', 'and', 'ChirpCLI', '.', 'SETUP_COMPLETED', ':', 'logging', '.', 'debug', '(', '"Master logging has been setup. This call will be ignored."', ')', 'return', 'if', 'log_dir', 'and', 'not', 'os', '.', 'path', '.', 'exists', '(', 'log_dir', ')', ':', 'os', '.', 'makedirs', '(', 'log_dir', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'with', 'open', '(', 'filename', ')', 'as', 'config_file', ':', 'try', ':', 'config', '=', 'json', '.', 'load', '(', 'config_file', ')', 'logging', '.', 'config', '.', 'dictConfig', '(', 'config', ')', 'logging', '.', 'info', '(', '"logging was setup using {}"', '.', 'format', '(', 'filename', ')', ')', 'ChirpCLI', '.', 'SETUP_COMPLETED', '=', 'True', 'except', 'Exception', 'as', 'e', ':', 'logging', '.', 'exception', '(', '"Could not load logging config"', ')', '# default logging config', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')', 'else', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')']
Try to load logging configuration from a file. Set level to INFO if failed.
['Try', 'to', 'load', 'logging', 'configuration', 'from', 'a', 'file', '.', 'Set', 'level', 'to', 'INFO', 'if', 'failed', '.']
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/cli.py#L35-L55
24
doconix/django-mako-plus
django_mako_plus/template/adapter.py
MakoTemplateAdapter.name
def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'
python
def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'
['def', 'name', '(', 'self', ')', ':', 'if', 'self', '.', 'mako_template', '.', 'filename', ':', 'return', 'os', '.', 'path', '.', 'basename', '(', 'self', '.', 'mako_template', '.', 'filename', ')', 'return', "'string'"]
Returns the name of this template (if created from a file) or "string" if not
['Returns', 'the', 'name', 'of', 'this', 'template', '(', 'if', 'created', 'from', 'a', 'file', ')', 'or', 'string', 'if', 'not']
train
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/template/adapter.py#L39-L43
25
HazyResearch/fonduer
src/fonduer/learning/disc_models/sparse_lstm.py
SparseLSTM.forward
def forward(self, X): """Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) # Generate lstm weight indices x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) # Calculate textual features from LSTMs for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) # Concatenate textual features with multi-modal features feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)
python
def forward(self, X): """Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) # Generate lstm weight indices x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) # Calculate textual features from LSTMs for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) # Concatenate textual features with multi-modal features feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)
['def', 'forward', '(', 'self', ',', 'X', ')', ':', 's', '=', 'X', '[', ':', '-', '2', ']', 'f', '=', 'X', '[', '-', '2', ']', 'w', '=', 'X', '[', '-', '1', ']', 'batch_size', '=', 'len', '(', 'f', ')', '# Generate lstm weight indices', 'x_idx', '=', 'self', '.', '_cuda', '(', 'torch', '.', 'as_tensor', '(', 'np', '.', 'arange', '(', '1', ',', 'self', '.', 'settings', '[', '"lstm_dim"', ']', '+', '1', ')', ')', '.', 'repeat', '(', 'batch_size', ',', '1', ')', ')', 'outputs', '=', 'self', '.', '_cuda', '(', 'torch', '.', 'Tensor', '(', '[', ']', ')', ')', '# Calculate textual features from LSTMs', 'for', 'i', 'in', 'range', '(', 'len', '(', 's', ')', ')', ':', 'state_word', '=', 'self', '.', 'lstms', '[', '0', ']', '.', 'init_hidden', '(', 'batch_size', ')', 'output', '=', 'self', '.', 'lstms', '[', '0', ']', '.', 'forward', '(', 's', '[', 'i', ']', '[', '0', ']', ',', 's', '[', 'i', ']', '[', '1', ']', ',', 'state_word', ')', 'outputs', '=', 'torch', '.', 'cat', '(', '(', 'outputs', ',', 'output', ')', ',', '1', ')', '# Concatenate textual features with multi-modal features', 'feaures', '=', 'torch', '.', 'cat', '(', '(', 'x_idx', ',', 'f', ')', ',', '1', ')', 'weights', '=', 'torch', '.', 'cat', '(', '(', 'outputs', ',', 'w', ')', ',', '1', ')', 'return', 'self', '.', 'sparse_linear', '(', 'feaures', ',', 'weights', ')']
Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes)
['Forward', 'function', '.']
train
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/sparse_lstm.py#L25-L64
26
google/grr
grr/server/grr_response_server/databases/mysql_flows.py
MySQLDBFlowMixin.CountFlowResultsByType
def CountFlowResultsByType(self, client_id, flow_id, cursor=None): """Returns counts of flow results grouped by result type.""" query = ("SELECT type, COUNT(*) FROM flow_results " "FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) " "WHERE client_id = %s AND flow_id = %s " "GROUP BY type") args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)] cursor.execute(query, args) return dict(cursor.fetchall())
python
def CountFlowResultsByType(self, client_id, flow_id, cursor=None): """Returns counts of flow results grouped by result type.""" query = ("SELECT type, COUNT(*) FROM flow_results " "FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) " "WHERE client_id = %s AND flow_id = %s " "GROUP BY type") args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)] cursor.execute(query, args) return dict(cursor.fetchall())
['def', 'CountFlowResultsByType', '(', 'self', ',', 'client_id', ',', 'flow_id', ',', 'cursor', '=', 'None', ')', ':', 'query', '=', '(', '"SELECT type, COUNT(*) FROM flow_results "', '"FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) "', '"WHERE client_id = %s AND flow_id = %s "', '"GROUP BY type"', ')', 'args', '=', '[', 'db_utils', '.', 'ClientIDToInt', '(', 'client_id', ')', ',', 'db_utils', '.', 'FlowIDToInt', '(', 'flow_id', ')', ']', 'cursor', '.', 'execute', '(', 'query', ',', 'args', ')', 'return', 'dict', '(', 'cursor', '.', 'fetchall', '(', ')', ')']
Returns counts of flow results grouped by result type.
['Returns', 'counts', 'of', 'flow', 'results', 'grouped', 'by', 'result', 'type', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L1350-L1360
27
google/grr
grr/client/grr_response_client/client_actions/artifact_collector.py
ArtifactCollector._ProcessGrepSource
def _ProcessGrepSource(self, source): """Find files fulfilling regex conditions.""" attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request
python
def _ProcessGrepSource(self, source): """Find files fulfilling regex conditions.""" attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request
['def', '_ProcessGrepSource', '(', 'self', ',', 'source', ')', ':', 'attributes', '=', 'source', '.', 'base_source', '.', 'attributes', 'paths', '=', 'artifact_utils', '.', 'InterpolateListKbAttributes', '(', 'attributes', '[', '"paths"', ']', ',', 'self', '.', 'knowledge_base', ',', 'self', '.', 'ignore_interpolation_errors', ')', 'regex', '=', 'utils', '.', 'RegexListDisjunction', '(', 'attributes', '[', '"content_regex_list"', ']', ')', 'condition', '=', 'rdf_file_finder', '.', 'FileFinderCondition', '.', 'ContentsRegexMatch', '(', 'regex', '=', 'regex', ',', 'mode', '=', '"ALL_HITS"', ')', 'file_finder_action', '=', 'rdf_file_finder', '.', 'FileFinderAction', '.', 'Stat', '(', ')', 'request', '=', 'rdf_file_finder', '.', 'FileFinderArgs', '(', 'paths', '=', 'paths', ',', 'action', '=', 'file_finder_action', ',', 'conditions', '=', '[', 'condition', ']', ',', 'follow_links', '=', 'True', ')', 'action', '=', 'file_finder', '.', 'FileFinderOSFromClient', 'yield', 'action', ',', 'request']
Find files fulfilling regex conditions.
['Find', 'files', 'fulfilling', 'regex', 'conditions', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/artifact_collector.py#L208-L225
28
F5Networks/f5-common-python
f5/multi_device/cluster/__init__.py
ClusterManager.manage_extant
def manage_extant(self, **kwargs): '''Manage an existing cluster :param kwargs: dict -- keyword args in dict ''' self._check_device_number(kwargs['devices']) self.trust_domain = TrustDomain( devices=kwargs['devices'], partition=kwargs['device_group_partition'] ) self.device_group = DeviceGroup(**kwargs) self.cluster = Cluster(**kwargs)
python
def manage_extant(self, **kwargs): '''Manage an existing cluster :param kwargs: dict -- keyword args in dict ''' self._check_device_number(kwargs['devices']) self.trust_domain = TrustDomain( devices=kwargs['devices'], partition=kwargs['device_group_partition'] ) self.device_group = DeviceGroup(**kwargs) self.cluster = Cluster(**kwargs)
['def', 'manage_extant', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', '_check_device_number', '(', 'kwargs', '[', "'devices'", ']', ')', 'self', '.', 'trust_domain', '=', 'TrustDomain', '(', 'devices', '=', 'kwargs', '[', "'devices'", ']', ',', 'partition', '=', 'kwargs', '[', "'device_group_partition'", ']', ')', 'self', '.', 'device_group', '=', 'DeviceGroup', '(', '*', '*', 'kwargs', ')', 'self', '.', 'cluster', '=', 'Cluster', '(', '*', '*', 'kwargs', ')']
Manage an existing cluster :param kwargs: dict -- keyword args in dict
['Manage', 'an', 'existing', 'cluster']
train
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/multi_device/cluster/__init__.py#L136-L148
29
google/python-gflags
gflags/flagvalues.py
FlagValues._GetFlagsDefinedByModule
def _GetFlagsDefinedByModule(self, module): """Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ return list(self.FlagsByModuleDict().get(module, []))
python
def _GetFlagsDefinedByModule(self, module): """Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ return list(self.FlagsByModuleDict().get(module, []))
['def', '_GetFlagsDefinedByModule', '(', 'self', ',', 'module', ')', ':', 'if', 'not', 'isinstance', '(', 'module', ',', 'str', ')', ':', 'module', '=', 'module', '.', '__name__', 'return', 'list', '(', 'self', '.', 'FlagsByModuleDict', '(', ')', '.', 'get', '(', 'module', ',', '[', ']', ')', ')']
Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object.
['Returns', 'the', 'list', 'of', 'flags', 'defined', 'by', 'a', 'module', '.']
train
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L265-L279
30
openp2pdesign/makerlabs
makerlabs/hackaday_io.py
get_labs
def get_labs(format): """Gets Hackerspaces data from hackaday.io.""" hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url) hackerspaces = {} # Load all the Hackerspaces for i in hackerspaces_json: current_lab = Hackerspace() current_lab.id = i["id"] current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id current_lab.name = i["name"] if len(i["description"]) != 0: current_lab.description = i["description"] elif len(i["summary"]) != 0: current_lab.description = i["summary"] current_lab.created_at = i["moments"]["exact"] # Check if there are coordinates if i["latlon"] is not None: latlon = json.loads(i["latlon"]) current_lab.latitude = latlon["lat"] current_lab.longitude = latlon["lng"] # Get country, county and city from them country = geolocator.reverse( [latlon["lat"], latlon["lng"]]) current_lab.country = country.raw[ "address"]["country"] current_lab.address = country.raw["display_name"] current_lab.address_1 = country.raw["display_name"] current_lab.country_code = country.raw[ "address"]["country_code"] current_lab.county = country.raw[ "address"]["state_district"] current_lab.city = country.raw[ "address"]["city"] current_lab.postal_code = country.raw[ "address"]["postcode"] else: # For labs without a location or coordinates # add 0,0 as coordinates current_lab.latitude = 0.0 current_lab.longitude = 0.0 # Add the lab hackerspaces[i["name"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in hackerspaces: single = hackerspaces[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = hackerspaces # Default: return an oject else: output = hackerspaces # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
python
def get_labs(format): """Gets Hackerspaces data from hackaday.io.""" hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url) hackerspaces = {} # Load all the Hackerspaces for i in hackerspaces_json: current_lab = Hackerspace() current_lab.id = i["id"] current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id current_lab.name = i["name"] if len(i["description"]) != 0: current_lab.description = i["description"] elif len(i["summary"]) != 0: current_lab.description = i["summary"] current_lab.created_at = i["moments"]["exact"] # Check if there are coordinates if i["latlon"] is not None: latlon = json.loads(i["latlon"]) current_lab.latitude = latlon["lat"] current_lab.longitude = latlon["lng"] # Get country, county and city from them country = geolocator.reverse( [latlon["lat"], latlon["lng"]]) current_lab.country = country.raw[ "address"]["country"] current_lab.address = country.raw["display_name"] current_lab.address_1 = country.raw["display_name"] current_lab.country_code = country.raw[ "address"]["country_code"] current_lab.county = country.raw[ "address"]["state_district"] current_lab.city = country.raw[ "address"]["city"] current_lab.postal_code = country.raw[ "address"]["postcode"] else: # For labs without a location or coordinates # add 0,0 as coordinates current_lab.latitude = 0.0 current_lab.longitude = 0.0 # Add the lab hackerspaces[i["name"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in hackerspaces: single = hackerspaces[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = hackerspaces # Default: return an oject else: output = hackerspaces # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
['def', 'get_labs', '(', 'format', ')', ':', 'hackerspaces_json', '=', 'data_from_hackaday_io', '(', 'hackaday_io_labs_map_url', ')', 'hackerspaces', '=', '{', '}', '# Load all the Hackerspaces', 'for', 'i', 'in', 'hackerspaces_json', ':', 'current_lab', '=', 'Hackerspace', '(', ')', 'current_lab', '.', 'id', '=', 'i', '[', '"id"', ']', 'current_lab', '.', 'url', '=', '"https://hackaday.io/hackerspace/"', '+', 'current_lab', '.', 'id', 'current_lab', '.', 'name', '=', 'i', '[', '"name"', ']', 'if', 'len', '(', 'i', '[', '"description"', ']', ')', '!=', '0', ':', 'current_lab', '.', 'description', '=', 'i', '[', '"description"', ']', 'elif', 'len', '(', 'i', '[', '"summary"', ']', ')', '!=', '0', ':', 'current_lab', '.', 'description', '=', 'i', '[', '"summary"', ']', 'current_lab', '.', 'created_at', '=', 'i', '[', '"moments"', ']', '[', '"exact"', ']', '# Check if there are coordinates', 'if', 'i', '[', '"latlon"', ']', 'is', 'not', 'None', ':', 'latlon', '=', 'json', '.', 'loads', '(', 'i', '[', '"latlon"', ']', ')', 'current_lab', '.', 'latitude', '=', 'latlon', '[', '"lat"', ']', 'current_lab', '.', 'longitude', '=', 'latlon', '[', '"lng"', ']', '# Get country, county and city from them', 'country', '=', 'geolocator', '.', 'reverse', '(', '[', 'latlon', '[', '"lat"', ']', ',', 'latlon', '[', '"lng"', ']', ']', ')', 'current_lab', '.', 'country', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"country"', ']', 'current_lab', '.', 'address', '=', 'country', '.', 'raw', '[', '"display_name"', ']', 'current_lab', '.', 'address_1', '=', 'country', '.', 'raw', '[', '"display_name"', ']', 'current_lab', '.', 'country_code', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"country_code"', ']', 'current_lab', '.', 'county', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"state_district"', ']', 'current_lab', '.', 'city', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"city"', ']', 'current_lab', '.', 'postal_code', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"postcode"', ']', 'else', ':', '# For labs without a location or coordinates', '# add 0,0 as coordinates', 'current_lab', '.', 'latitude', '=', '0.0', 'current_lab', '.', 'longitude', '=', '0.0', '# Add the lab', 'hackerspaces', '[', 'i', '[', '"name"', ']', ']', '=', 'current_lab', '# Return a dictiornary / json', 'if', 'format', '.', 'lower', '(', ')', '==', '"dict"', 'or', 'format', '.', 'lower', '(', ')', '==', '"json"', ':', 'output', '=', '{', '}', 'for', 'j', 'in', 'hackerspaces', ':', 'output', '[', 'j', ']', '=', 'hackerspaces', '[', 'j', ']', '.', '__dict__', '# Return a geojson', 'elif', 'format', '.', 'lower', '(', ')', '==', '"geojson"', 'or', 'format', '.', 'lower', '(', ')', '==', '"geo"', ':', 'labs_list', '=', '[', ']', 'for', 'l', 'in', 'hackerspaces', ':', 'single', '=', 'hackerspaces', '[', 'l', ']', '.', '__dict__', 'single_lab', '=', 'Feature', '(', 'type', '=', '"Feature"', ',', 'geometry', '=', 'Point', '(', '(', 'single', '[', '"latitude"', ']', ',', 'single', '[', '"longitude"', ']', ')', ')', ',', 'properties', '=', 'single', ')', 'labs_list', '.', 'append', '(', 'single_lab', ')', 'output', '=', 'dumps', '(', 'FeatureCollection', '(', 'labs_list', ')', ')', '# Return a Pandas DataFrame', 'elif', 'format', '.', 'lower', '(', ')', '==', '"pandas"', 'or', 'format', '.', 'lower', '(', ')', '==', '"dataframe"', ':', 'output', '=', '{', '}', 'for', 'j', 'in', 'hackerspaces', ':', 'output', '[', 'j', ']', '=', 'hackerspaces', '[', 'j', ']', '.', '__dict__', '# Transform the dict into a Pandas DataFrame', 'output', '=', 'pd', '.', 'DataFrame', '.', 'from_dict', '(', 'output', ')', 'output', '=', 'output', '.', 'transpose', '(', ')', '# Return an object', 'elif', 'format', '.', 'lower', '(', ')', '==', '"object"', 'or', 'format', '.', 'lower', '(', ')', '==', '"obj"', ':', 'output', '=', 'hackerspaces', '# Default: return an oject', 'else', ':', 'output', '=', 'hackerspaces', '# Return a proper json', 'if', 'format', '.', 'lower', '(', ')', '==', '"json"', ':', 'output', '=', 'json', '.', 'dumps', '(', 'output', ')', 'return', 'output']
Gets Hackerspaces data from hackaday.io.
['Gets', 'Hackerspaces', 'data', 'from', 'hackaday', '.', 'io', '.']
train
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/hackaday_io.py#L57-L137
31
Azure/blobxfer
cli/cli.py
upload
def upload(ctx): """Upload files to Azure Storage""" settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ctx.general_options, ctx.credentials, spec ).start()
python
def upload(ctx): """Upload files to Azure Storage""" settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ctx.general_options, ctx.credentials, spec ).start()
['def', 'upload', '(', 'ctx', ')', ':', 'settings', '.', 'add_cli_options', '(', 'ctx', '.', 'cli_options', ',', 'settings', '.', 'TransferAction', '.', 'Upload', ')', 'ctx', '.', 'initialize', '(', 'settings', '.', 'TransferAction', '.', 'Upload', ')', 'specs', '=', 'settings', '.', 'create_upload_specifications', '(', 'ctx', '.', 'cli_options', ',', 'ctx', '.', 'config', ')', 'del', 'ctx', '.', 'cli_options', 'for', 'spec', 'in', 'specs', ':', 'blobxfer', '.', 'api', '.', 'Uploader', '(', 'ctx', '.', 'general_options', ',', 'ctx', '.', 'credentials', ',', 'spec', ')', '.', 'start', '(', ')']
Upload files to Azure Storage
['Upload', 'files', 'to', 'Azure', 'Storage']
train
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/cli/cli.py#L1106-L1116
32
mitsei/dlkit
dlkit/json_/resource/sessions.py
ResourceBinAssignmentSession.get_assignable_bin_ids
def get_assignable_bin_ids(self, bin_id): """Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy) bins = lookup_session.get_bins() id_list = [] for bin in bins: id_list.append(bin.get_id()) return IdList(id_list)
python
def get_assignable_bin_ids(self, bin_id): """Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy) bins = lookup_session.get_bins() id_list = [] for bin in bins: id_list.append(bin.get_id()) return IdList(id_list)
['def', 'get_assignable_bin_ids', '(', 'self', ',', 'bin_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids', '# This will likely be overridden by an authorization adapter', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'RESOURCE'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_bin_lookup_session', '(', 'proxy', '=', 'self', '.', '_proxy', ')', 'bins', '=', 'lookup_session', '.', 'get_bins', '(', ')', 'id_list', '=', '[', ']', 'for', 'bin', 'in', 'bins', ':', 'id_list', '.', 'append', '(', 'bin', '.', 'get_id', '(', ')', ')', 'return', 'IdList', '(', 'id_list', ')']
Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
['Gets', 'a', 'list', 'of', 'bins', 'including', 'and', 'under', 'the', 'given', 'bin', 'node', 'in', 'which', 'any', 'resource', 'can', 'be', 'assigned', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L1562-L1581
33
sbusard/wagoner
wagoner/table.py
Table.check
def check(self): """ Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. """ for character, followers in self.items(): for follower in followers: if follower not in self: return False return True
python
def check(self): """ Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. """ for character, followers in self.items(): for follower in followers: if follower not in self: return False return True
['def', 'check', '(', 'self', ')', ':', 'for', 'character', ',', 'followers', 'in', 'self', '.', 'items', '(', ')', ':', 'for', 'follower', 'in', 'followers', ':', 'if', 'follower', 'not', 'in', 'self', ':', 'return', 'False', 'return', 'True']
Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise.
['Check', 'that', 'this', 'table', 'is', 'complete', 'that', 'is', 'every', 'character', 'of', 'this', 'table', 'can', 'be', 'followed', 'by', 'a', 'new', 'character', '.']
train
https://github.com/sbusard/wagoner/blob/7f83d66bbd0e009e4d4232ffdf319bd5a2a5683b/wagoner/table.py#L81-L92
34
DataDog/integrations-core
openstack/datadog_checks/openstack/openstack.py
KeystoneCatalog.get_neutron_endpoint
def get_neutron_endpoint(cls, json_resp): """ Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog """ catalog = json_resp.get('token', {}).get('catalog', []) match = 'neutron' neutron_endpoint = None for entry in catalog: if entry['name'] == match or 'Networking' in entry['name']: valid_endpoints = {} for ep in entry['endpoints']: interface = ep.get('interface', '') if interface in ['public', 'internal']: valid_endpoints[interface] = ep['url'] if valid_endpoints: # Favor public endpoints over internal neutron_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal")) break else: raise MissingNeutronEndpoint() return neutron_endpoint
python
def get_neutron_endpoint(cls, json_resp): """ Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog """ catalog = json_resp.get('token', {}).get('catalog', []) match = 'neutron' neutron_endpoint = None for entry in catalog: if entry['name'] == match or 'Networking' in entry['name']: valid_endpoints = {} for ep in entry['endpoints']: interface = ep.get('interface', '') if interface in ['public', 'internal']: valid_endpoints[interface] = ep['url'] if valid_endpoints: # Favor public endpoints over internal neutron_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal")) break else: raise MissingNeutronEndpoint() return neutron_endpoint
['def', 'get_neutron_endpoint', '(', 'cls', ',', 'json_resp', ')', ':', 'catalog', '=', 'json_resp', '.', 'get', '(', "'token'", ',', '{', '}', ')', '.', 'get', '(', "'catalog'", ',', '[', ']', ')', 'match', '=', "'neutron'", 'neutron_endpoint', '=', 'None', 'for', 'entry', 'in', 'catalog', ':', 'if', 'entry', '[', "'name'", ']', '==', 'match', 'or', "'Networking'", 'in', 'entry', '[', "'name'", ']', ':', 'valid_endpoints', '=', '{', '}', 'for', 'ep', 'in', 'entry', '[', "'endpoints'", ']', ':', 'interface', '=', 'ep', '.', 'get', '(', "'interface'", ',', "''", ')', 'if', 'interface', 'in', '[', "'public'", ',', "'internal'", ']', ':', 'valid_endpoints', '[', 'interface', ']', '=', 'ep', '[', "'url'", ']', 'if', 'valid_endpoints', ':', '# Favor public endpoints over internal', 'neutron_endpoint', '=', 'valid_endpoints', '.', 'get', '(', '"public"', ',', 'valid_endpoints', '.', 'get', '(', '"internal"', ')', ')', 'break', 'else', ':', 'raise', 'MissingNeutronEndpoint', '(', ')', 'return', 'neutron_endpoint']
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service Sends a CRITICAL service check when none are found registered in the Catalog
['Parse', 'the', 'service', 'catalog', 'returned', 'by', 'the', 'Identity', 'API', 'for', 'an', 'endpoint', 'matching', 'the', 'Neutron', 'service', 'Sends', 'a', 'CRITICAL', 'service', 'check', 'when', 'none', 'are', 'found', 'registered', 'in', 'the', 'Catalog']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack/datadog_checks/openstack/openstack.py#L455-L479
35
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
Flow.validate_json_schema
def validate_json_schema(self): """Validate the JSON schema. Return list of errors.""" errors = [] for work in self: for task in work: if not task.get_results().validate_json_schema(): errors.append(task) if not work.get_results().validate_json_schema(): errors.append(work) if not self.get_results().validate_json_schema(): errors.append(self) return errors
python
def validate_json_schema(self): """Validate the JSON schema. Return list of errors.""" errors = [] for work in self: for task in work: if not task.get_results().validate_json_schema(): errors.append(task) if not work.get_results().validate_json_schema(): errors.append(work) if not self.get_results().validate_json_schema(): errors.append(self) return errors
['def', 'validate_json_schema', '(', 'self', ')', ':', 'errors', '=', '[', ']', 'for', 'work', 'in', 'self', ':', 'for', 'task', 'in', 'work', ':', 'if', 'not', 'task', '.', 'get_results', '(', ')', '.', 'validate_json_schema', '(', ')', ':', 'errors', '.', 'append', '(', 'task', ')', 'if', 'not', 'work', '.', 'get_results', '(', ')', '.', 'validate_json_schema', '(', ')', ':', 'errors', '.', 'append', '(', 'work', ')', 'if', 'not', 'self', '.', 'get_results', '(', ')', '.', 'validate_json_schema', '(', ')', ':', 'errors', '.', 'append', '(', 'self', ')', 'return', 'errors']
Validate the JSON schema. Return list of errors.
['Validate', 'the', 'JSON', 'schema', '.', 'Return', 'list', 'of', 'errors', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L426-L439
36
ga4gh/ga4gh-server
ga4gh/server/datamodel/datasets.py
Dataset.getRnaQuantificationSetByName
def getRnaQuantificationSetByName(self, name): """ Returns the RnaQuantification set with the specified name, or raises an exception otherwise. """ if name not in self._rnaQuantificationSetNameMap: raise exceptions.RnaQuantificationSetNameNotFoundException(name) return self._rnaQuantificationSetNameMap[name]
python
def getRnaQuantificationSetByName(self, name): """ Returns the RnaQuantification set with the specified name, or raises an exception otherwise. """ if name not in self._rnaQuantificationSetNameMap: raise exceptions.RnaQuantificationSetNameNotFoundException(name) return self._rnaQuantificationSetNameMap[name]
['def', 'getRnaQuantificationSetByName', '(', 'self', ',', 'name', ')', ':', 'if', 'name', 'not', 'in', 'self', '.', '_rnaQuantificationSetNameMap', ':', 'raise', 'exceptions', '.', 'RnaQuantificationSetNameNotFoundException', '(', 'name', ')', 'return', 'self', '.', '_rnaQuantificationSetNameMap', '[', 'name', ']']
Returns the RnaQuantification set with the specified name, or raises an exception otherwise.
['Returns', 'the', 'RnaQuantification', 'set', 'with', 'the', 'specified', 'name', 'or', 'raises', 'an', 'exception', 'otherwise', '.']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/datasets.py#L402-L409
37
dpnova/python-xprintidle
xprintidle.py
_create_modulename
def _create_modulename(cdef_sources, source, sys_version): """ This is the same as CFFI's create modulename except we don't include the CFFI version. """ key = '\x00'.join([sys_version[:3], source, cdef_sources]) key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') return '_xprintidle_cffi_{0}{1}'.format(k1, k2)
python
def _create_modulename(cdef_sources, source, sys_version): """ This is the same as CFFI's create modulename except we don't include the CFFI version. """ key = '\x00'.join([sys_version[:3], source, cdef_sources]) key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') return '_xprintidle_cffi_{0}{1}'.format(k1, k2)
['def', '_create_modulename', '(', 'cdef_sources', ',', 'source', ',', 'sys_version', ')', ':', 'key', '=', "'\\x00'", '.', 'join', '(', '[', 'sys_version', '[', ':', '3', ']', ',', 'source', ',', 'cdef_sources', ']', ')', 'key', '=', 'key', '.', 'encode', '(', "'utf-8'", ')', 'k1', '=', 'hex', '(', 'binascii', '.', 'crc32', '(', 'key', '[', '0', ':', ':', '2', ']', ')', '&', '0xffffffff', ')', 'k1', '=', 'k1', '.', 'lstrip', '(', "'0x'", ')', '.', 'rstrip', '(', "'L'", ')', 'k2', '=', 'hex', '(', 'binascii', '.', 'crc32', '(', 'key', '[', '1', ':', ':', '2', ']', ')', '&', '0xffffffff', ')', 'k2', '=', 'k2', '.', 'lstrip', '(', "'0'", ')', '.', 'rstrip', '(', "'L'", ')', 'return', "'_xprintidle_cffi_{0}{1}'", '.', 'format', '(', 'k1', ',', 'k2', ')']
This is the same as CFFI's create modulename except we don't include the CFFI version.
['This', 'is', 'the', 'same', 'as', 'CFFI', 's', 'create', 'modulename', 'except', 'we', 'don', 't', 'include', 'the', 'CFFI', 'version', '.']
train
https://github.com/dpnova/python-xprintidle/blob/cc8f3c13a5dd578073d20f3d42208fcb8e1983b8/xprintidle.py#L10-L21
38
inasafe/inasafe
safe/gui/tools/batch/batch_dialog.py
BatchDialog.populate_table
def populate_table(self, scenario_directory): """ Populate table with files from scenario_directory directory. :param scenario_directory: Path where .txt & .py reside. :type scenario_directory: QString """ parsed_files = [] unparsed_files = [] self.table.clearContents() # Block signal to allow update checking only when the table is ready self.table.blockSignals(True) # NOTE(gigih): need this line to remove existing rows self.table.setRowCount(0) if not os.path.exists(scenario_directory): # LOGGER.info('Scenario directory does not exist: %s' % path) return # only support .py and .txt files for current_path in os.listdir(scenario_directory): extension = os.path.splitext(current_path)[1] absolute_path = os.path.join(scenario_directory, current_path) if extension == '.py': append_row(self.table, current_path, absolute_path) elif extension == '.txt': # insert scenarios from file into table widget try: scenarios = read_scenarios(absolute_path) validate_scenario(scenarios, scenario_directory) for key, value in list(scenarios.items()): append_row(self.table, key, value) parsed_files.append(current_path) except Error: unparsed_files.append(current_path) # unblock signal self.table.blockSignals(False)
python
def populate_table(self, scenario_directory): """ Populate table with files from scenario_directory directory. :param scenario_directory: Path where .txt & .py reside. :type scenario_directory: QString """ parsed_files = [] unparsed_files = [] self.table.clearContents() # Block signal to allow update checking only when the table is ready self.table.blockSignals(True) # NOTE(gigih): need this line to remove existing rows self.table.setRowCount(0) if not os.path.exists(scenario_directory): # LOGGER.info('Scenario directory does not exist: %s' % path) return # only support .py and .txt files for current_path in os.listdir(scenario_directory): extension = os.path.splitext(current_path)[1] absolute_path = os.path.join(scenario_directory, current_path) if extension == '.py': append_row(self.table, current_path, absolute_path) elif extension == '.txt': # insert scenarios from file into table widget try: scenarios = read_scenarios(absolute_path) validate_scenario(scenarios, scenario_directory) for key, value in list(scenarios.items()): append_row(self.table, key, value) parsed_files.append(current_path) except Error: unparsed_files.append(current_path) # unblock signal self.table.blockSignals(False)
['def', 'populate_table', '(', 'self', ',', 'scenario_directory', ')', ':', 'parsed_files', '=', '[', ']', 'unparsed_files', '=', '[', ']', 'self', '.', 'table', '.', 'clearContents', '(', ')', '# Block signal to allow update checking only when the table is ready', 'self', '.', 'table', '.', 'blockSignals', '(', 'True', ')', '# NOTE(gigih): need this line to remove existing rows', 'self', '.', 'table', '.', 'setRowCount', '(', '0', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'scenario_directory', ')', ':', "# LOGGER.info('Scenario directory does not exist: %s' % path)", 'return', '# only support .py and .txt files', 'for', 'current_path', 'in', 'os', '.', 'listdir', '(', 'scenario_directory', ')', ':', 'extension', '=', 'os', '.', 'path', '.', 'splitext', '(', 'current_path', ')', '[', '1', ']', 'absolute_path', '=', 'os', '.', 'path', '.', 'join', '(', 'scenario_directory', ',', 'current_path', ')', 'if', 'extension', '==', "'.py'", ':', 'append_row', '(', 'self', '.', 'table', ',', 'current_path', ',', 'absolute_path', ')', 'elif', 'extension', '==', "'.txt'", ':', '# insert scenarios from file into table widget', 'try', ':', 'scenarios', '=', 'read_scenarios', '(', 'absolute_path', ')', 'validate_scenario', '(', 'scenarios', ',', 'scenario_directory', ')', 'for', 'key', ',', 'value', 'in', 'list', '(', 'scenarios', '.', 'items', '(', ')', ')', ':', 'append_row', '(', 'self', '.', 'table', ',', 'key', ',', 'value', ')', 'parsed_files', '.', 'append', '(', 'current_path', ')', 'except', 'Error', ':', 'unparsed_files', '.', 'append', '(', 'current_path', ')', '# unblock signal', 'self', '.', 'table', '.', 'blockSignals', '(', 'False', ')']
Populate table with files from scenario_directory directory. :param scenario_directory: Path where .txt & .py reside. :type scenario_directory: QString
['Populate', 'table', 'with', 'files', 'from', 'scenario_directory', 'directory', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L213-L251
39
sibirrer/lenstronomy
lenstronomy/Sampling/likelihood.py
LikelihoodModule.logL
def logL(self, args): """ routine to compute X2 given variable parameters for a MCMC/PSO chain """ #extract parameters kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.param.args2kwargs(args) #generate image and computes likelihood self._reset_point_source_cache(bool=True) logL = 0 if self._check_bounds is True: penalty, bound_hit = self.check_bounds(args, self._lower_limit, self._upper_limit) logL -= penalty if bound_hit: return logL, None if self._image_likelihood is True: logL += self.image_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) if self._time_delay_likelihood is True: logL += self.time_delay_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) if self._check_positive_flux is True: bool = self.param.check_positive_flux(kwargs_source, kwargs_lens_light, kwargs_ps) if bool is False: logL -= 10**10 if self._flux_ratio_likelihood is True: logL += self.flux_ratio_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) logL += self._position_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) logL += self._prior_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo) self._reset_point_source_cache(bool=False) return logL, None
python
def logL(self, args): """ routine to compute X2 given variable parameters for a MCMC/PSO chain """ #extract parameters kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.param.args2kwargs(args) #generate image and computes likelihood self._reset_point_source_cache(bool=True) logL = 0 if self._check_bounds is True: penalty, bound_hit = self.check_bounds(args, self._lower_limit, self._upper_limit) logL -= penalty if bound_hit: return logL, None if self._image_likelihood is True: logL += self.image_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) if self._time_delay_likelihood is True: logL += self.time_delay_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) if self._check_positive_flux is True: bool = self.param.check_positive_flux(kwargs_source, kwargs_lens_light, kwargs_ps) if bool is False: logL -= 10**10 if self._flux_ratio_likelihood is True: logL += self.flux_ratio_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) logL += self._position_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo) logL += self._prior_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo) self._reset_point_source_cache(bool=False) return logL, None
['def', 'logL', '(', 'self', ',', 'args', ')', ':', '#extract parameters', 'kwargs_lens', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ',', 'kwargs_cosmo', '=', 'self', '.', 'param', '.', 'args2kwargs', '(', 'args', ')', '#generate image and computes likelihood', 'self', '.', '_reset_point_source_cache', '(', 'bool', '=', 'True', ')', 'logL', '=', '0', 'if', 'self', '.', '_check_bounds', 'is', 'True', ':', 'penalty', ',', 'bound_hit', '=', 'self', '.', 'check_bounds', '(', 'args', ',', 'self', '.', '_lower_limit', ',', 'self', '.', '_upper_limit', ')', 'logL', '-=', 'penalty', 'if', 'bound_hit', ':', 'return', 'logL', ',', 'None', 'if', 'self', '.', '_image_likelihood', 'is', 'True', ':', 'logL', '+=', 'self', '.', 'image_likelihood', '.', 'logL', '(', 'kwargs_lens', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ')', 'if', 'self', '.', '_time_delay_likelihood', 'is', 'True', ':', 'logL', '+=', 'self', '.', 'time_delay_likelihood', '.', 'logL', '(', 'kwargs_lens', ',', 'kwargs_ps', ',', 'kwargs_cosmo', ')', 'if', 'self', '.', '_check_positive_flux', 'is', 'True', ':', 'bool', '=', 'self', '.', 'param', '.', 'check_positive_flux', '(', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ')', 'if', 'bool', 'is', 'False', ':', 'logL', '-=', '10', '**', '10', 'if', 'self', '.', '_flux_ratio_likelihood', 'is', 'True', ':', 'logL', '+=', 'self', '.', 'flux_ratio_likelihood', '.', 'logL', '(', 'kwargs_lens', ',', 'kwargs_ps', ',', 'kwargs_cosmo', ')', 'logL', '+=', 'self', '.', '_position_likelihood', '.', 'logL', '(', 'kwargs_lens', ',', 'kwargs_ps', ',', 'kwargs_cosmo', ')', 'logL', '+=', 'self', '.', '_prior_likelihood', '.', 'logL', '(', 'kwargs_lens', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ',', 'kwargs_cosmo', ')', 'self', '.', '_reset_point_source_cache', '(', 'bool', '=', 'False', ')', 'return', 'logL', ',', 'None']
routine to compute X2 given variable parameters for a MCMC/PSO chain
['routine', 'to', 'compute', 'X2', 'given', 'variable', 'parameters', 'for', 'a', 'MCMC', '/', 'PSO', 'chain']
train
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Sampling/likelihood.py#L96-L123
40
wandb/client
wandb/__init__.py
log
def log(row=None, commit=True, *args, **kargs): """Log a dict to the global run's history. If commit is false, enables multiple calls before commiting. Eg. wandb.log({'train-loss': 0.5, 'accuracy': 0.9}) """ if run is None: raise ValueError( "You must call `wandb.init` in the same process before calling log") if row is None: row = {} if commit: run.history.add(row, *args, **kargs) else: run.history.update(row, *args, **kargs)
python
def log(row=None, commit=True, *args, **kargs): """Log a dict to the global run's history. If commit is false, enables multiple calls before commiting. Eg. wandb.log({'train-loss': 0.5, 'accuracy': 0.9}) """ if run is None: raise ValueError( "You must call `wandb.init` in the same process before calling log") if row is None: row = {} if commit: run.history.add(row, *args, **kargs) else: run.history.update(row, *args, **kargs)
['def', 'log', '(', 'row', '=', 'None', ',', 'commit', '=', 'True', ',', '*', 'args', ',', '*', '*', 'kargs', ')', ':', 'if', 'run', 'is', 'None', ':', 'raise', 'ValueError', '(', '"You must call `wandb.init` in the same process before calling log"', ')', 'if', 'row', 'is', 'None', ':', 'row', '=', '{', '}', 'if', 'commit', ':', 'run', '.', 'history', '.', 'add', '(', 'row', ',', '*', 'args', ',', '*', '*', 'kargs', ')', 'else', ':', 'run', '.', 'history', '.', 'update', '(', 'row', ',', '*', 'args', ',', '*', '*', 'kargs', ')']
Log a dict to the global run's history. If commit is false, enables multiple calls before commiting. Eg. wandb.log({'train-loss': 0.5, 'accuracy': 0.9})
['Log', 'a', 'dict', 'to', 'the', 'global', 'run', 's', 'history', '.', 'If', 'commit', 'is', 'false', 'enables', 'multiple', 'calls', 'before', 'commiting', '.']
train
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/__init__.py#L465-L481
41
aras7/deployr-python-client
deployr_connection.py
DeployRConnection.login
def login(self, username, password, disableautosave=True, print_response=True): """ :param username: :param password: :param disableautosave: boolean :param print_response: print log if required :return: status code, response data """ if type(username) != str: return False, "Username must be string" if type(password) != str: return False, "Password must be string" if type(disableautosave) != bool: return False, "Disableautosave must be boolean" data = {"username": username, "password": password, "disableautosave": disableautosave} status_response, response = self.call_api("r/user/login/", data, print_response=print_response) # Store httpcookie if possible if status_response and "deployr" in response: if "response" in response["deployr"]: if "httpcookie" in response["deployr"]["response"]: self.JSESSIONID = response["deployr"]["response"]["httpcookie"] return status_response, response
python
def login(self, username, password, disableautosave=True, print_response=True): """ :param username: :param password: :param disableautosave: boolean :param print_response: print log if required :return: status code, response data """ if type(username) != str: return False, "Username must be string" if type(password) != str: return False, "Password must be string" if type(disableautosave) != bool: return False, "Disableautosave must be boolean" data = {"username": username, "password": password, "disableautosave": disableautosave} status_response, response = self.call_api("r/user/login/", data, print_response=print_response) # Store httpcookie if possible if status_response and "deployr" in response: if "response" in response["deployr"]: if "httpcookie" in response["deployr"]["response"]: self.JSESSIONID = response["deployr"]["response"]["httpcookie"] return status_response, response
['def', 'login', '(', 'self', ',', 'username', ',', 'password', ',', 'disableautosave', '=', 'True', ',', 'print_response', '=', 'True', ')', ':', 'if', 'type', '(', 'username', ')', '!=', 'str', ':', 'return', 'False', ',', '"Username must be string"', 'if', 'type', '(', 'password', ')', '!=', 'str', ':', 'return', 'False', ',', '"Password must be string"', 'if', 'type', '(', 'disableautosave', ')', '!=', 'bool', ':', 'return', 'False', ',', '"Disableautosave must be boolean"', 'data', '=', '{', '"username"', ':', 'username', ',', '"password"', ':', 'password', ',', '"disableautosave"', ':', 'disableautosave', '}', 'status_response', ',', 'response', '=', 'self', '.', 'call_api', '(', '"r/user/login/"', ',', 'data', ',', 'print_response', '=', 'print_response', ')', '# Store httpcookie if possible', 'if', 'status_response', 'and', '"deployr"', 'in', 'response', ':', 'if', '"response"', 'in', 'response', '[', '"deployr"', ']', ':', 'if', '"httpcookie"', 'in', 'response', '[', '"deployr"', ']', '[', '"response"', ']', ':', 'self', '.', 'JSESSIONID', '=', 'response', '[', '"deployr"', ']', '[', '"response"', ']', '[', '"httpcookie"', ']', 'return', 'status_response', ',', 'response']
:param username: :param password: :param disableautosave: boolean :param print_response: print log if required :return: status code, response data
[':', 'param', 'username', ':', ':', 'param', 'password', ':', ':', 'param', 'disableautosave', ':', 'boolean', ':', 'param', 'print_response', ':', 'print', 'log', 'if', 'required', ':', 'return', ':', 'status', 'code', 'response', 'data']
train
https://github.com/aras7/deployr-python-client/blob/3ca517ff38e9a7dd1e21fcc88d54537546b9e7e5/deployr_connection.py#L28-L55
42
saltstack/salt
salt/states/zone.py
detached
def detached(name): ''' Ensure zone is detached name : string name of the zone ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} zones = __salt__['zoneadm.list'](installed=True, configured=True) if name in zones: if zones[name]['state'] != 'configured': if __opts__['test']: res_detach = {'status': True} else: res_detach = __salt__['zoneadm.detach'](name) ret['result'] = res_detach['status'] if ret['result']: ret['changes'][name] = 'detached' ret['comment'] = 'The zone {0} was detached.'.format(name) else: ret['comment'] = [] ret['comment'].append('Failed to detach zone {0}!'.format(name)) if 'message' in res_detach: ret['comment'].append(res_detach['message']) ret['comment'] = "\n".join(ret['comment']) else: ret['result'] = True ret['comment'] = 'zone {0} already detached.'.format(name) else: ## note: a non existing zone is not attached, we do not consider this a failure ret['result'] = True ret['comment'] = 'zone {0} is not configured!'.format(name) return ret
python
def detached(name): ''' Ensure zone is detached name : string name of the zone ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} zones = __salt__['zoneadm.list'](installed=True, configured=True) if name in zones: if zones[name]['state'] != 'configured': if __opts__['test']: res_detach = {'status': True} else: res_detach = __salt__['zoneadm.detach'](name) ret['result'] = res_detach['status'] if ret['result']: ret['changes'][name] = 'detached' ret['comment'] = 'The zone {0} was detached.'.format(name) else: ret['comment'] = [] ret['comment'].append('Failed to detach zone {0}!'.format(name)) if 'message' in res_detach: ret['comment'].append(res_detach['message']) ret['comment'] = "\n".join(ret['comment']) else: ret['result'] = True ret['comment'] = 'zone {0} already detached.'.format(name) else: ## note: a non existing zone is not attached, we do not consider this a failure ret['result'] = True ret['comment'] = 'zone {0} is not configured!'.format(name) return ret
['def', 'detached', '(', 'name', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", '}', 'zones', '=', '__salt__', '[', "'zoneadm.list'", ']', '(', 'installed', '=', 'True', ',', 'configured', '=', 'True', ')', 'if', 'name', 'in', 'zones', ':', 'if', 'zones', '[', 'name', ']', '[', "'state'", ']', '!=', "'configured'", ':', 'if', '__opts__', '[', "'test'", ']', ':', 'res_detach', '=', '{', "'status'", ':', 'True', '}', 'else', ':', 'res_detach', '=', '__salt__', '[', "'zoneadm.detach'", ']', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'res_detach', '[', "'status'", ']', 'if', 'ret', '[', "'result'", ']', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', "'detached'", 'ret', '[', "'comment'", ']', '=', "'The zone {0} was detached.'", '.', 'format', '(', 'name', ')', 'else', ':', 'ret', '[', "'comment'", ']', '=', '[', ']', 'ret', '[', "'comment'", ']', '.', 'append', '(', "'Failed to detach zone {0}!'", '.', 'format', '(', 'name', ')', ')', 'if', "'message'", 'in', 'res_detach', ':', 'ret', '[', "'comment'", ']', '.', 'append', '(', 'res_detach', '[', "'message'", ']', ')', 'ret', '[', "'comment'", ']', '=', '"\\n"', '.', 'join', '(', 'ret', '[', "'comment'", ']', ')', 'else', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'zone {0} already detached.'", '.', 'format', '(', 'name', ')', 'else', ':', '## note: a non existing zone is not attached, we do not consider this a failure', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'zone {0} is not configured!'", '.', 'format', '(', 'name', ')', 'return', 'ret']
Ensure zone is detached name : string name of the zone
['Ensure', 'zone', 'is', 'detached']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zone.py#L1100-L1138
43
onelogin/python3-saml
src/onelogin/saml2/response.py
OneLogin_Saml2_Response.__query
def __query(self, query, tagid=None): """ Extracts nodes that match the query from the Response :param query: Xpath Expresion :type query: String :param tagid: Tag ID :type query: String :returns: The queried nodes :rtype: list """ if self.encrypted: document = self.decrypted_document else: document = self.document return OneLogin_Saml2_XML.query(document, query, None, tagid)
python
def __query(self, query, tagid=None): """ Extracts nodes that match the query from the Response :param query: Xpath Expresion :type query: String :param tagid: Tag ID :type query: String :returns: The queried nodes :rtype: list """ if self.encrypted: document = self.decrypted_document else: document = self.document return OneLogin_Saml2_XML.query(document, query, None, tagid)
['def', '__query', '(', 'self', ',', 'query', ',', 'tagid', '=', 'None', ')', ':', 'if', 'self', '.', 'encrypted', ':', 'document', '=', 'self', '.', 'decrypted_document', 'else', ':', 'document', '=', 'self', '.', 'document', 'return', 'OneLogin_Saml2_XML', '.', 'query', '(', 'document', ',', 'query', ',', 'None', ',', 'tagid', ')']
Extracts nodes that match the query from the Response :param query: Xpath Expresion :type query: String :param tagid: Tag ID :type query: String :returns: The queried nodes :rtype: list
['Extracts', 'nodes', 'that', 'match', 'the', 'query', 'from', 'the', 'Response']
train
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L760-L777
44
ray-project/ray
python/ray/rllib/env/atari_wrappers.py
wrap_deepmind
def wrap_deepmind(env, dim=84, framestack=True): """Configure environment for DeepMind-style Atari. Note that we assume reward clipping is done outside the wrapper. Args: dim (int): Dimension to resize observations to (dim x dim). framestack (bool): Whether to framestack observations. """ env = MonitorEnv(env) env = NoopResetEnv(env, noop_max=30) if "NoFrameskip" in env.spec.id: env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env) if "FIRE" in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env, dim) # env = ScaledFloatFrame(env) # TODO: use for dqn? # env = ClipRewardEnv(env) # reward clipping is handled by policy eval if framestack: env = FrameStack(env, 4) return env
python
def wrap_deepmind(env, dim=84, framestack=True): """Configure environment for DeepMind-style Atari. Note that we assume reward clipping is done outside the wrapper. Args: dim (int): Dimension to resize observations to (dim x dim). framestack (bool): Whether to framestack observations. """ env = MonitorEnv(env) env = NoopResetEnv(env, noop_max=30) if "NoFrameskip" in env.spec.id: env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env) if "FIRE" in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env, dim) # env = ScaledFloatFrame(env) # TODO: use for dqn? # env = ClipRewardEnv(env) # reward clipping is handled by policy eval if framestack: env = FrameStack(env, 4) return env
['def', 'wrap_deepmind', '(', 'env', ',', 'dim', '=', '84', ',', 'framestack', '=', 'True', ')', ':', 'env', '=', 'MonitorEnv', '(', 'env', ')', 'env', '=', 'NoopResetEnv', '(', 'env', ',', 'noop_max', '=', '30', ')', 'if', '"NoFrameskip"', 'in', 'env', '.', 'spec', '.', 'id', ':', 'env', '=', 'MaxAndSkipEnv', '(', 'env', ',', 'skip', '=', '4', ')', 'env', '=', 'EpisodicLifeEnv', '(', 'env', ')', 'if', '"FIRE"', 'in', 'env', '.', 'unwrapped', '.', 'get_action_meanings', '(', ')', ':', 'env', '=', 'FireResetEnv', '(', 'env', ')', 'env', '=', 'WarpFrame', '(', 'env', ',', 'dim', ')', '# env = ScaledFloatFrame(env) # TODO: use for dqn?', '# env = ClipRewardEnv(env) # reward clipping is handled by policy eval', 'if', 'framestack', ':', 'env', '=', 'FrameStack', '(', 'env', ',', '4', ')', 'return', 'env']
Configure environment for DeepMind-style Atari. Note that we assume reward clipping is done outside the wrapper. Args: dim (int): Dimension to resize observations to (dim x dim). framestack (bool): Whether to framestack observations.
['Configure', 'environment', 'for', 'DeepMind', '-', 'style', 'Atari', '.']
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/env/atari_wrappers.py#L270-L291
45
monarch-initiative/dipper
dipper/sources/Reactome.py
Reactome._parse_reactome_association_file
def _parse_reactome_association_file( self, file, limit=None, subject_prefix=None, object_prefix=None): """ Parse ensembl gene to reactome pathway file :param file: file path (not handle) :param limit: limit (int, optional) limit the number of rows processed :return: None """ eco_map = Reactome.get_eco_map(Reactome.map_files['eco_map']) count = 0 with open(file, 'r') as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: (component, pathway_id, pathway_iri, pathway_label, go_ecode, species_name) = row count += 1 self._add_component_pathway_association( eco_map, component, subject_prefix, pathway_id, object_prefix, pathway_label, go_ecode) if limit is not None and count >= limit: break return
python
def _parse_reactome_association_file( self, file, limit=None, subject_prefix=None, object_prefix=None): """ Parse ensembl gene to reactome pathway file :param file: file path (not handle) :param limit: limit (int, optional) limit the number of rows processed :return: None """ eco_map = Reactome.get_eco_map(Reactome.map_files['eco_map']) count = 0 with open(file, 'r') as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: (component, pathway_id, pathway_iri, pathway_label, go_ecode, species_name) = row count += 1 self._add_component_pathway_association( eco_map, component, subject_prefix, pathway_id, object_prefix, pathway_label, go_ecode) if limit is not None and count >= limit: break return
['def', '_parse_reactome_association_file', '(', 'self', ',', 'file', ',', 'limit', '=', 'None', ',', 'subject_prefix', '=', 'None', ',', 'object_prefix', '=', 'None', ')', ':', 'eco_map', '=', 'Reactome', '.', 'get_eco_map', '(', 'Reactome', '.', 'map_files', '[', "'eco_map'", ']', ')', 'count', '=', '0', 'with', 'open', '(', 'file', ',', "'r'", ')', 'as', 'tsvfile', ':', 'reader', '=', 'csv', '.', 'reader', '(', 'tsvfile', ',', 'delimiter', '=', '"\\t"', ')', 'for', 'row', 'in', 'reader', ':', '(', 'component', ',', 'pathway_id', ',', 'pathway_iri', ',', 'pathway_label', ',', 'go_ecode', ',', 'species_name', ')', '=', 'row', 'count', '+=', '1', 'self', '.', '_add_component_pathway_association', '(', 'eco_map', ',', 'component', ',', 'subject_prefix', ',', 'pathway_id', ',', 'object_prefix', ',', 'pathway_label', ',', 'go_ecode', ')', 'if', 'limit', 'is', 'not', 'None', 'and', 'count', '>=', 'limit', ':', 'break', 'return']
Parse ensembl gene to reactome pathway file :param file: file path (not handle) :param limit: limit (int, optional) limit the number of rows processed :return: None
['Parse', 'ensembl', 'gene', 'to', 'reactome', 'pathway', 'file', ':', 'param', 'file', ':', 'file', 'path', '(', 'not', 'handle', ')', ':', 'param', 'limit', ':', 'limit', '(', 'int', 'optional', ')', 'limit', 'the', 'number', 'of', 'rows', 'processed', ':', 'return', ':', 'None']
train
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Reactome.py#L75-L98
46
jtwhite79/pyemu
pyemu/utils/helpers.py
PstFromFlopyModel.build_prior
def build_prior(self, fmt="ascii",filename=None,droptol=None, chunk=None, sparse=False, sigma_range=6): """ build a prior parameter covariance matrix. Parameters ---------- fmt : str the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo". default is "ascii" filename : str the filename to save the prior cov matrix to. If None, the name is formed using model nam_file name. Default is None. droptol : float tolerance for dropping near-zero values when writing compressed binary. Default is None chunk : int chunk size to write in a single pass - for binary only sparse : bool flag to build a pyemu.SparseMatrix format cov matrix. Default is False sigma_range : float number of standard deviations represented by the parameter bounds. Default is 6. Returns ------- cov : pyemu.Cov a full covariance matrix """ fmt = fmt.lower() acc_fmts = ["ascii","binary","uncfile","none","coo"] if fmt not in acc_fmts: self.logger.lraise("unrecognized prior save 'fmt':{0}, options are: {1}". format(fmt,','.join(acc_fmts))) self.log("building prior covariance matrix") struct_dict = {} if self.pp_suffix in self.par_dfs.keys(): pp_df = self.par_dfs[self.pp_suffix] pp_dfs = [] for pargp in pp_df.pargp.unique(): gp_df = pp_df.loc[pp_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") pp_dfs.append(p_df) #pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()] struct_dict[self.pp_geostruct] = pp_dfs if self.gr_suffix in self.par_dfs.keys(): gr_df = self.par_dfs[self.gr_suffix] gr_dfs = [] for pargp in gr_df.pargp.unique(): gp_df = gr_df.loc[gr_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") gr_dfs.append(p_df) #gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()] struct_dict[self.grid_geostruct] = gr_dfs if "temporal_list" in self.par_dfs.keys(): bc_df = self.par_dfs["temporal_list"] bc_df.loc[:,"y"] = 0 bc_df.loc[:,"x"] = bc_df.timedelta.apply(lambda x: x.days) bc_dfs = [] for pargp in bc_df.pargp.unique(): gp_df = bc_df.loc[bc_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") #print(p_df) bc_dfs.append(p_df) #bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()] struct_dict[self.temporal_list_geostruct] = bc_dfs if "spatial_list" in self.par_dfs.keys(): bc_df = self.par_dfs["spatial_list"] bc_dfs = [] for pargp in bc_df.pargp.unique(): gp_df = bc_df.loc[bc_df.pargp==pargp,:] #p_df = gp_df.drop_duplicates(subset="parnme") #print(p_df) bc_dfs.append(gp_df) struct_dict[self.spatial_list_geostruct] = bc_dfs if "hfb" in self.par_dfs.keys(): if self.spatial_list_geostruct in struct_dict.keys(): struct_dict[self.spatial_list_geostruct].append(self.par_dfs["hfb"]) else: struct_dict[self.spatial_list_geostruct] = [self.par_dfs["hfb"]] if "sfr" in self.par_dfs.keys(): self.logger.warn("geospatial prior not implemented for SFR pars") if len(struct_dict) > 0: if sparse: cov = pyemu.helpers.sparse_geostatistical_prior_builder(self.pst, struct_dict=struct_dict, sigma_range=sigma_range) else: cov = pyemu.helpers.geostatistical_prior_builder(self.pst, struct_dict=struct_dict, sigma_range=sigma_range) else: cov = pyemu.Cov.from_parameter_data(self.pst,sigma_range=sigma_range) if filename is None: filename = os.path.join(self.m.model_ws,self.pst_name+".prior.cov") if fmt != "none": self.logger.statement("saving prior covariance matrix to file {0}".format(filename)) if fmt == 'ascii': cov.to_ascii(filename) elif fmt == 'binary': cov.to_binary(filename,droptol=droptol,chunk=chunk) elif fmt == 'uncfile': cov.to_uncfile(filename) elif fmt == 'coo': cov.to_coo(filename,droptol=droptol,chunk=chunk) self.log("building prior covariance matrix") return cov
python
def build_prior(self, fmt="ascii",filename=None,droptol=None, chunk=None, sparse=False, sigma_range=6): """ build a prior parameter covariance matrix. Parameters ---------- fmt : str the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo". default is "ascii" filename : str the filename to save the prior cov matrix to. If None, the name is formed using model nam_file name. Default is None. droptol : float tolerance for dropping near-zero values when writing compressed binary. Default is None chunk : int chunk size to write in a single pass - for binary only sparse : bool flag to build a pyemu.SparseMatrix format cov matrix. Default is False sigma_range : float number of standard deviations represented by the parameter bounds. Default is 6. Returns ------- cov : pyemu.Cov a full covariance matrix """ fmt = fmt.lower() acc_fmts = ["ascii","binary","uncfile","none","coo"] if fmt not in acc_fmts: self.logger.lraise("unrecognized prior save 'fmt':{0}, options are: {1}". format(fmt,','.join(acc_fmts))) self.log("building prior covariance matrix") struct_dict = {} if self.pp_suffix in self.par_dfs.keys(): pp_df = self.par_dfs[self.pp_suffix] pp_dfs = [] for pargp in pp_df.pargp.unique(): gp_df = pp_df.loc[pp_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") pp_dfs.append(p_df) #pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()] struct_dict[self.pp_geostruct] = pp_dfs if self.gr_suffix in self.par_dfs.keys(): gr_df = self.par_dfs[self.gr_suffix] gr_dfs = [] for pargp in gr_df.pargp.unique(): gp_df = gr_df.loc[gr_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") gr_dfs.append(p_df) #gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()] struct_dict[self.grid_geostruct] = gr_dfs if "temporal_list" in self.par_dfs.keys(): bc_df = self.par_dfs["temporal_list"] bc_df.loc[:,"y"] = 0 bc_df.loc[:,"x"] = bc_df.timedelta.apply(lambda x: x.days) bc_dfs = [] for pargp in bc_df.pargp.unique(): gp_df = bc_df.loc[bc_df.pargp==pargp,:] p_df = gp_df.drop_duplicates(subset="parnme") #print(p_df) bc_dfs.append(p_df) #bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()] struct_dict[self.temporal_list_geostruct] = bc_dfs if "spatial_list" in self.par_dfs.keys(): bc_df = self.par_dfs["spatial_list"] bc_dfs = [] for pargp in bc_df.pargp.unique(): gp_df = bc_df.loc[bc_df.pargp==pargp,:] #p_df = gp_df.drop_duplicates(subset="parnme") #print(p_df) bc_dfs.append(gp_df) struct_dict[self.spatial_list_geostruct] = bc_dfs if "hfb" in self.par_dfs.keys(): if self.spatial_list_geostruct in struct_dict.keys(): struct_dict[self.spatial_list_geostruct].append(self.par_dfs["hfb"]) else: struct_dict[self.spatial_list_geostruct] = [self.par_dfs["hfb"]] if "sfr" in self.par_dfs.keys(): self.logger.warn("geospatial prior not implemented for SFR pars") if len(struct_dict) > 0: if sparse: cov = pyemu.helpers.sparse_geostatistical_prior_builder(self.pst, struct_dict=struct_dict, sigma_range=sigma_range) else: cov = pyemu.helpers.geostatistical_prior_builder(self.pst, struct_dict=struct_dict, sigma_range=sigma_range) else: cov = pyemu.Cov.from_parameter_data(self.pst,sigma_range=sigma_range) if filename is None: filename = os.path.join(self.m.model_ws,self.pst_name+".prior.cov") if fmt != "none": self.logger.statement("saving prior covariance matrix to file {0}".format(filename)) if fmt == 'ascii': cov.to_ascii(filename) elif fmt == 'binary': cov.to_binary(filename,droptol=droptol,chunk=chunk) elif fmt == 'uncfile': cov.to_uncfile(filename) elif fmt == 'coo': cov.to_coo(filename,droptol=droptol,chunk=chunk) self.log("building prior covariance matrix") return cov
['def', 'build_prior', '(', 'self', ',', 'fmt', '=', '"ascii"', ',', 'filename', '=', 'None', ',', 'droptol', '=', 'None', ',', 'chunk', '=', 'None', ',', 'sparse', '=', 'False', ',', 'sigma_range', '=', '6', ')', ':', 'fmt', '=', 'fmt', '.', 'lower', '(', ')', 'acc_fmts', '=', '[', '"ascii"', ',', '"binary"', ',', '"uncfile"', ',', '"none"', ',', '"coo"', ']', 'if', 'fmt', 'not', 'in', 'acc_fmts', ':', 'self', '.', 'logger', '.', 'lraise', '(', '"unrecognized prior save \'fmt\':{0}, options are: {1}"', '.', 'format', '(', 'fmt', ',', "','", '.', 'join', '(', 'acc_fmts', ')', ')', ')', 'self', '.', 'log', '(', '"building prior covariance matrix"', ')', 'struct_dict', '=', '{', '}', 'if', 'self', '.', 'pp_suffix', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'pp_df', '=', 'self', '.', 'par_dfs', '[', 'self', '.', 'pp_suffix', ']', 'pp_dfs', '=', '[', ']', 'for', 'pargp', 'in', 'pp_df', '.', 'pargp', '.', 'unique', '(', ')', ':', 'gp_df', '=', 'pp_df', '.', 'loc', '[', 'pp_df', '.', 'pargp', '==', 'pargp', ',', ':', ']', 'p_df', '=', 'gp_df', '.', 'drop_duplicates', '(', 'subset', '=', '"parnme"', ')', 'pp_dfs', '.', 'append', '(', 'p_df', ')', '#pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]', 'struct_dict', '[', 'self', '.', 'pp_geostruct', ']', '=', 'pp_dfs', 'if', 'self', '.', 'gr_suffix', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'gr_df', '=', 'self', '.', 'par_dfs', '[', 'self', '.', 'gr_suffix', ']', 'gr_dfs', '=', '[', ']', 'for', 'pargp', 'in', 'gr_df', '.', 'pargp', '.', 'unique', '(', ')', ':', 'gp_df', '=', 'gr_df', '.', 'loc', '[', 'gr_df', '.', 'pargp', '==', 'pargp', ',', ':', ']', 'p_df', '=', 'gp_df', '.', 'drop_duplicates', '(', 'subset', '=', '"parnme"', ')', 'gr_dfs', '.', 'append', '(', 'p_df', ')', '#gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]', 'struct_dict', '[', 'self', '.', 'grid_geostruct', ']', '=', 'gr_dfs', 'if', '"temporal_list"', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'bc_df', '=', 'self', '.', 'par_dfs', '[', '"temporal_list"', ']', 'bc_df', '.', 'loc', '[', ':', ',', '"y"', ']', '=', '0', 'bc_df', '.', 'loc', '[', ':', ',', '"x"', ']', '=', 'bc_df', '.', 'timedelta', '.', 'apply', '(', 'lambda', 'x', ':', 'x', '.', 'days', ')', 'bc_dfs', '=', '[', ']', 'for', 'pargp', 'in', 'bc_df', '.', 'pargp', '.', 'unique', '(', ')', ':', 'gp_df', '=', 'bc_df', '.', 'loc', '[', 'bc_df', '.', 'pargp', '==', 'pargp', ',', ':', ']', 'p_df', '=', 'gp_df', '.', 'drop_duplicates', '(', 'subset', '=', '"parnme"', ')', '#print(p_df)', 'bc_dfs', '.', 'append', '(', 'p_df', ')', '#bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]', 'struct_dict', '[', 'self', '.', 'temporal_list_geostruct', ']', '=', 'bc_dfs', 'if', '"spatial_list"', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'bc_df', '=', 'self', '.', 'par_dfs', '[', '"spatial_list"', ']', 'bc_dfs', '=', '[', ']', 'for', 'pargp', 'in', 'bc_df', '.', 'pargp', '.', 'unique', '(', ')', ':', 'gp_df', '=', 'bc_df', '.', 'loc', '[', 'bc_df', '.', 'pargp', '==', 'pargp', ',', ':', ']', '#p_df = gp_df.drop_duplicates(subset="parnme")', '#print(p_df)', 'bc_dfs', '.', 'append', '(', 'gp_df', ')', 'struct_dict', '[', 'self', '.', 'spatial_list_geostruct', ']', '=', 'bc_dfs', 'if', '"hfb"', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'if', 'self', '.', 'spatial_list_geostruct', 'in', 'struct_dict', '.', 'keys', '(', ')', ':', 'struct_dict', '[', 'self', '.', 'spatial_list_geostruct', ']', '.', 'append', '(', 'self', '.', 'par_dfs', '[', '"hfb"', ']', ')', 'else', ':', 'struct_dict', '[', 'self', '.', 'spatial_list_geostruct', ']', '=', '[', 'self', '.', 'par_dfs', '[', '"hfb"', ']', ']', 'if', '"sfr"', 'in', 'self', '.', 'par_dfs', '.', 'keys', '(', ')', ':', 'self', '.', 'logger', '.', 'warn', '(', '"geospatial prior not implemented for SFR pars"', ')', 'if', 'len', '(', 'struct_dict', ')', '>', '0', ':', 'if', 'sparse', ':', 'cov', '=', 'pyemu', '.', 'helpers', '.', 'sparse_geostatistical_prior_builder', '(', 'self', '.', 'pst', ',', 'struct_dict', '=', 'struct_dict', ',', 'sigma_range', '=', 'sigma_range', ')', 'else', ':', 'cov', '=', 'pyemu', '.', 'helpers', '.', 'geostatistical_prior_builder', '(', 'self', '.', 'pst', ',', 'struct_dict', '=', 'struct_dict', ',', 'sigma_range', '=', 'sigma_range', ')', 'else', ':', 'cov', '=', 'pyemu', '.', 'Cov', '.', 'from_parameter_data', '(', 'self', '.', 'pst', ',', 'sigma_range', '=', 'sigma_range', ')', 'if', 'filename', 'is', 'None', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'm', '.', 'model_ws', ',', 'self', '.', 'pst_name', '+', '".prior.cov"', ')', 'if', 'fmt', '!=', '"none"', ':', 'self', '.', 'logger', '.', 'statement', '(', '"saving prior covariance matrix to file {0}"', '.', 'format', '(', 'filename', ')', ')', 'if', 'fmt', '==', "'ascii'", ':', 'cov', '.', 'to_ascii', '(', 'filename', ')', 'elif', 'fmt', '==', "'binary'", ':', 'cov', '.', 'to_binary', '(', 'filename', ',', 'droptol', '=', 'droptol', ',', 'chunk', '=', 'chunk', ')', 'elif', 'fmt', '==', "'uncfile'", ':', 'cov', '.', 'to_uncfile', '(', 'filename', ')', 'elif', 'fmt', '==', "'coo'", ':', 'cov', '.', 'to_coo', '(', 'filename', ',', 'droptol', '=', 'droptol', ',', 'chunk', '=', 'chunk', ')', 'self', '.', 'log', '(', '"building prior covariance matrix"', ')', 'return', 'cov']
build a prior parameter covariance matrix. Parameters ---------- fmt : str the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo". default is "ascii" filename : str the filename to save the prior cov matrix to. If None, the name is formed using model nam_file name. Default is None. droptol : float tolerance for dropping near-zero values when writing compressed binary. Default is None chunk : int chunk size to write in a single pass - for binary only sparse : bool flag to build a pyemu.SparseMatrix format cov matrix. Default is False sigma_range : float number of standard deviations represented by the parameter bounds. Default is 6. Returns ------- cov : pyemu.Cov a full covariance matrix
['build', 'a', 'prior', 'parameter', 'covariance', 'matrix', '.']
train
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L2632-L2744
47
wummel/linkchecker
linkcheck/checker/__init__.py
get_url_from
def get_url_from (base_url, recursion_level, aggregate, parent_url=None, base_ref=None, line=0, column=0, page=0, name=u"", parent_content_type=None, extern=None): """ Get url data from given base data. @param base_url: base url from a link tag @type base_url: string or None @param recursion_level: current recursion level @type recursion_level: number @param aggregate: aggregate object @type aggregate: aggregate.Consumer @param parent_url: parent url @type parent_url: string or None @param base_ref: base url from <base> tag @type base_ref string or None @param line: line number @type line: number @param column: column number @type column: number @param page: page number @type page: number @param name: link name @type name: string @param extern: (is_extern, is_strict) or None @type extern: tuple(int, int) or None """ if base_url is not None: base_url = strformat.unicode_safe(base_url) # left strip for detection of URL scheme base_url_stripped = base_url.lstrip() else: base_url_stripped = base_url if parent_url is not None: parent_url = strformat.unicode_safe(parent_url) if base_ref is not None: base_ref = strformat.unicode_safe(base_ref) name = strformat.unicode_safe(name) url = absolute_url(base_url_stripped, base_ref, parent_url).lower() if ":" in url: scheme = url.split(":", 1)[0].lower() else: scheme = None if not (url or name): # use filename as base url, with slash as path seperator name = base_url.replace("\\", "/") allowed_schemes = aggregate.config["allowedschemes"] # ignore local PHP files with execution directives local_php = (parent_content_type == 'application/x-httpd-php' and '<?' in base_url and '?>' in base_url and scheme == 'file') if local_php or (allowed_schemes and scheme not in allowed_schemes): klass = ignoreurl.IgnoreUrl else: assume_local_file = (recursion_level == 0) klass = get_urlclass_from(scheme, assume_local_file=assume_local_file) log.debug(LOG_CHECK, "%s handles url %s", klass.__name__, base_url) return klass(base_url, recursion_level, aggregate, parent_url=parent_url, base_ref=base_ref, line=line, column=column, page=page, name=name, extern=extern)
python
def get_url_from (base_url, recursion_level, aggregate, parent_url=None, base_ref=None, line=0, column=0, page=0, name=u"", parent_content_type=None, extern=None): """ Get url data from given base data. @param base_url: base url from a link tag @type base_url: string or None @param recursion_level: current recursion level @type recursion_level: number @param aggregate: aggregate object @type aggregate: aggregate.Consumer @param parent_url: parent url @type parent_url: string or None @param base_ref: base url from <base> tag @type base_ref string or None @param line: line number @type line: number @param column: column number @type column: number @param page: page number @type page: number @param name: link name @type name: string @param extern: (is_extern, is_strict) or None @type extern: tuple(int, int) or None """ if base_url is not None: base_url = strformat.unicode_safe(base_url) # left strip for detection of URL scheme base_url_stripped = base_url.lstrip() else: base_url_stripped = base_url if parent_url is not None: parent_url = strformat.unicode_safe(parent_url) if base_ref is not None: base_ref = strformat.unicode_safe(base_ref) name = strformat.unicode_safe(name) url = absolute_url(base_url_stripped, base_ref, parent_url).lower() if ":" in url: scheme = url.split(":", 1)[0].lower() else: scheme = None if not (url or name): # use filename as base url, with slash as path seperator name = base_url.replace("\\", "/") allowed_schemes = aggregate.config["allowedschemes"] # ignore local PHP files with execution directives local_php = (parent_content_type == 'application/x-httpd-php' and '<?' in base_url and '?>' in base_url and scheme == 'file') if local_php or (allowed_schemes and scheme not in allowed_schemes): klass = ignoreurl.IgnoreUrl else: assume_local_file = (recursion_level == 0) klass = get_urlclass_from(scheme, assume_local_file=assume_local_file) log.debug(LOG_CHECK, "%s handles url %s", klass.__name__, base_url) return klass(base_url, recursion_level, aggregate, parent_url=parent_url, base_ref=base_ref, line=line, column=column, page=page, name=name, extern=extern)
['def', 'get_url_from', '(', 'base_url', ',', 'recursion_level', ',', 'aggregate', ',', 'parent_url', '=', 'None', ',', 'base_ref', '=', 'None', ',', 'line', '=', '0', ',', 'column', '=', '0', ',', 'page', '=', '0', ',', 'name', '=', 'u""', ',', 'parent_content_type', '=', 'None', ',', 'extern', '=', 'None', ')', ':', 'if', 'base_url', 'is', 'not', 'None', ':', 'base_url', '=', 'strformat', '.', 'unicode_safe', '(', 'base_url', ')', '# left strip for detection of URL scheme', 'base_url_stripped', '=', 'base_url', '.', 'lstrip', '(', ')', 'else', ':', 'base_url_stripped', '=', 'base_url', 'if', 'parent_url', 'is', 'not', 'None', ':', 'parent_url', '=', 'strformat', '.', 'unicode_safe', '(', 'parent_url', ')', 'if', 'base_ref', 'is', 'not', 'None', ':', 'base_ref', '=', 'strformat', '.', 'unicode_safe', '(', 'base_ref', ')', 'name', '=', 'strformat', '.', 'unicode_safe', '(', 'name', ')', 'url', '=', 'absolute_url', '(', 'base_url_stripped', ',', 'base_ref', ',', 'parent_url', ')', '.', 'lower', '(', ')', 'if', '":"', 'in', 'url', ':', 'scheme', '=', 'url', '.', 'split', '(', '":"', ',', '1', ')', '[', '0', ']', '.', 'lower', '(', ')', 'else', ':', 'scheme', '=', 'None', 'if', 'not', '(', 'url', 'or', 'name', ')', ':', '# use filename as base url, with slash as path seperator', 'name', '=', 'base_url', '.', 'replace', '(', '"\\\\"', ',', '"/"', ')', 'allowed_schemes', '=', 'aggregate', '.', 'config', '[', '"allowedschemes"', ']', '# ignore local PHP files with execution directives', 'local_php', '=', '(', 'parent_content_type', '==', "'application/x-httpd-php'", 'and', "'<?'", 'in', 'base_url', 'and', "'?>'", 'in', 'base_url', 'and', 'scheme', '==', "'file'", ')', 'if', 'local_php', 'or', '(', 'allowed_schemes', 'and', 'scheme', 'not', 'in', 'allowed_schemes', ')', ':', 'klass', '=', 'ignoreurl', '.', 'IgnoreUrl', 'else', ':', 'assume_local_file', '=', '(', 'recursion_level', '==', '0', ')', 'klass', '=', 'get_urlclass_from', '(', 'scheme', ',', 'assume_local_file', '=', 'assume_local_file', ')', 'log', '.', 'debug', '(', 'LOG_CHECK', ',', '"%s handles url %s"', ',', 'klass', '.', '__name__', ',', 'base_url', ')', 'return', 'klass', '(', 'base_url', ',', 'recursion_level', ',', 'aggregate', ',', 'parent_url', '=', 'parent_url', ',', 'base_ref', '=', 'base_ref', ',', 'line', '=', 'line', ',', 'column', '=', 'column', ',', 'page', '=', 'page', ',', 'name', '=', 'name', ',', 'extern', '=', 'extern', ')']
Get url data from given base data. @param base_url: base url from a link tag @type base_url: string or None @param recursion_level: current recursion level @type recursion_level: number @param aggregate: aggregate object @type aggregate: aggregate.Consumer @param parent_url: parent url @type parent_url: string or None @param base_ref: base url from <base> tag @type base_ref string or None @param line: line number @type line: number @param column: column number @type column: number @param page: page number @type page: number @param name: link name @type name: string @param extern: (is_extern, is_strict) or None @type extern: tuple(int, int) or None
['Get', 'url', 'data', 'from', 'given', 'base', 'data', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/__init__.py#L67-L125
48
peercoin/peercoin_rpc
peercoin_rpc/peercoin_rpc.py
Client.sendtoaddress
def sendtoaddress(self, recv_addr, amount, comment=""): """send ammount to address, with optional comment. Returns txid. sendtoaddress(ADDRESS, AMMOUNT, COMMENT)""" return self.req("sendtoaddress", [recv_addr, amount, comment])
python
def sendtoaddress(self, recv_addr, amount, comment=""): """send ammount to address, with optional comment. Returns txid. sendtoaddress(ADDRESS, AMMOUNT, COMMENT)""" return self.req("sendtoaddress", [recv_addr, amount, comment])
['def', 'sendtoaddress', '(', 'self', ',', 'recv_addr', ',', 'amount', ',', 'comment', '=', '""', ')', ':', 'return', 'self', '.', 'req', '(', '"sendtoaddress"', ',', '[', 'recv_addr', ',', 'amount', ',', 'comment', ']', ')']
send ammount to address, with optional comment. Returns txid. sendtoaddress(ADDRESS, AMMOUNT, COMMENT)
['send', 'ammount', 'to', 'address', 'with', 'optional', 'comment', '.', 'Returns', 'txid', '.', 'sendtoaddress', '(', 'ADDRESS', 'AMMOUNT', 'COMMENT', ')']
train
https://github.com/peercoin/peercoin_rpc/blob/6edd854c7fd607ad9f6f4d5eb8b8b7c7fd8c16cc/peercoin_rpc/peercoin_rpc.py#L182-L185
49
tensorflow/hub
examples/image_retraining/retrain.py
variable_summaries
def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var)
python
def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var)
['def', 'variable_summaries', '(', 'var', ')', ':', 'with', 'tf', '.', 'name_scope', '(', "'summaries'", ')', ':', 'mean', '=', 'tf', '.', 'reduce_mean', '(', 'var', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'mean'", ',', 'mean', ')', 'with', 'tf', '.', 'name_scope', '(', "'stddev'", ')', ':', 'stddev', '=', 'tf', '.', 'sqrt', '(', 'tf', '.', 'reduce_mean', '(', 'tf', '.', 'square', '(', 'var', '-', 'mean', ')', ')', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'stddev'", ',', 'stddev', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'max'", ',', 'tf', '.', 'reduce_max', '(', 'var', ')', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'min'", ',', 'tf', '.', 'reduce_min', '(', 'var', ')', ')', 'tf', '.', 'summary', '.', 'histogram', '(', "'histogram'", ',', 'var', ')']
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
['Attach', 'a', 'lot', 'of', 'summaries', 'to', 'a', 'Tensor', '(', 'for', 'TensorBoard', 'visualization', ')', '.']
train
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/image_retraining/retrain.py#L709-L719
50
henocdz/workon
workon/script.py
WorkOn._is_unique
def _is_unique(self, name, path): """verify if there is a project with given name or path on the database """ project = None try: project = Project.select().where( (Project.name == name) | (Project.path == path) )[0] except: pass return project is None
python
def _is_unique(self, name, path): """verify if there is a project with given name or path on the database """ project = None try: project = Project.select().where( (Project.name == name) | (Project.path == path) )[0] except: pass return project is None
['def', '_is_unique', '(', 'self', ',', 'name', ',', 'path', ')', ':', 'project', '=', 'None', 'try', ':', 'project', '=', 'Project', '.', 'select', '(', ')', '.', 'where', '(', '(', 'Project', '.', 'name', '==', 'name', ')', '|', '(', 'Project', '.', 'path', '==', 'path', ')', ')', '[', '0', ']', 'except', ':', 'pass', 'return', 'project', 'is', 'None']
verify if there is a project with given name or path on the database
['verify', 'if', 'there', 'is', 'a', 'project', 'with', 'given', 'name', 'or', 'path', 'on', 'the', 'database']
train
https://github.com/henocdz/workon/blob/46f1f6dc4ea95d8efd10adf93a06737237a6874d/workon/script.py#L34-L47
51
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_cifar_tpu_range
def imagetransformer_cifar_tpu_range(rhp): """Range of hyperparameters for vizier.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) rhp.set_discrete("hidden_size", [256, 512, 1024]) rhp.set_discrete("block_length", [128, 256, 512]) rhp.set_categorical("dec_attention_type", [ cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
python
def imagetransformer_cifar_tpu_range(rhp): """Range of hyperparameters for vizier.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) rhp.set_discrete("hidden_size", [256, 512, 1024]) rhp.set_discrete("block_length", [128, 256, 512]) rhp.set_categorical("dec_attention_type", [ cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
['def', 'imagetransformer_cifar_tpu_range', '(', 'rhp', ')', ':', '# After starting from base, set intervals for some parameters.', 'rhp', '.', 'set_float', '(', '"learning_rate"', ',', '0.01', ',', '1.0', ',', 'scale', '=', 'rhp', '.', 'LOG_SCALE', ')', 'rhp', '.', 'set_discrete', '(', '"num_decoder_layers"', ',', '[', '8', ',', '10', ',', '12', ',', '14', ',', '16', ']', ')', 'rhp', '.', 'set_discrete', '(', '"hidden_size"', ',', '[', '256', ',', '512', ',', '1024', ']', ')', 'rhp', '.', 'set_discrete', '(', '"block_length"', ',', '[', '128', ',', '256', ',', '512', ']', ')', 'rhp', '.', 'set_categorical', '(', '"dec_attention_type"', ',', '[', 'cia', '.', 'AttentionType', '.', 'RELATIVE_LOCAL_1D', ',', 'cia', '.', 'AttentionType', '.', 'LOCAL_1D', ']', ')']
Range of hyperparameters for vizier.
['Range', 'of', 'hyperparameters', 'for', 'vizier', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L1054-L1062
52
sethmlarson/trytravis
trytravis.py
_wait_for_travis_build
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
python
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
['def', '_wait_for_travis_build', '(', 'url', ',', 'commit', ',', 'committed_at', ')', ':', 'print', '(', "'Waiting for a Travis build to appear '", "'for `%s` after `%s`...'", '%', '(', 'commit', ',', 'committed_at', ')', ')', 'import', 'requests', 'slug', '=', '_slug_from_url', '(', 'url', ')', 'start_time', '=', 'time', '.', 'time', '(', ')', 'build_id', '=', 'None', 'while', 'time', '.', 'time', '(', ')', '-', 'start_time', '<', '60', ':', 'with', 'requests', '.', 'get', '(', "'https://api.travis-ci.org/repos/%s/builds'", '%', 'slug', ',', 'headers', '=', '_travis_headers', '(', ')', ')', 'as', 'r', ':', 'if', 'not', 'r', '.', 'ok', ':', 'raise', 'RuntimeError', '(', "'Could not reach the Travis API '", "'endpoint. Additional information: '", "'%s'", '%', 'str', '(', 'r', '.', 'content', ')', ')', '# Search through all commits and builds to find our build.', 'commit_to_sha', '=', '{', '}', 'json', '=', 'r', '.', 'json', '(', ')', 'for', 'travis_commit', 'in', 'sorted', '(', 'json', '[', "'commits'", ']', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', "'committed_at'", ']', ')', ':', 'travis_committed_at', '=', 'datetime', '.', 'datetime', '.', 'strptime', '(', 'travis_commit', '[', "'committed_at'", ']', ',', "'%Y-%m-%dT%H:%M:%SZ'", ')', '.', 'replace', '(', 'tzinfo', '=', 'utc', ')', 'if', 'travis_committed_at', '<', 'committed_at', ':', 'continue', 'commit_to_sha', '[', 'travis_commit', '[', "'id'", ']', ']', '=', 'travis_commit', '[', "'sha'", ']', 'for', 'build', 'in', 'json', '[', "'builds'", ']', ':', 'if', '(', 'build', '[', "'commit_id'", ']', 'in', 'commit_to_sha', 'and', 'commit_to_sha', '[', 'build', '[', "'commit_id'", ']', ']', '==', 'commit', ')', ':', 'build_id', '=', 'build', '[', "'id'", ']', 'print', '(', "'Travis build id: `%d`'", '%', 'build_id', ')', 'print', '(', "'Travis build URL: `https://travis-ci.org/'", "'%s/builds/%d`'", '%', '(', 'slug', ',', 'build_id', ')', ')', 'if', 'build_id', 'is', 'not', 'None', ':', 'break', 'time', '.', 'sleep', '(', '3.0', ')', 'else', ':', 'raise', 'RuntimeError', '(', "'Timed out while waiting for a Travis build '", "'to start. Is Travis configured for `%s`?'", '%', 'url', ')', 'return', 'build_id']
Waits for a Travis build to appear with the given commit SHA
['Waits', 'for', 'a', 'Travis', 'build', 'to', 'appear', 'with', 'the', 'given', 'commit', 'SHA']
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L188-L234
53
nameko/nameko
nameko/cli/shell.py
make_nameko_helper
def make_nameko_helper(config): """Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage. """ module = ModuleType('nameko') module.__doc__ = """Nameko shell helper for making rpc calls and dispatching events. Usage: >>> n.rpc.service.method() "reply" >>> n.dispatch_event('service', 'event_type', 'event_data') """ proxy = ClusterRpcProxy(config) module.rpc = proxy.start() module.dispatch_event = event_dispatcher(config) module.config = config module.disconnect = proxy.stop return module
python
def make_nameko_helper(config): """Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage. """ module = ModuleType('nameko') module.__doc__ = """Nameko shell helper for making rpc calls and dispatching events. Usage: >>> n.rpc.service.method() "reply" >>> n.dispatch_event('service', 'event_type', 'event_data') """ proxy = ClusterRpcProxy(config) module.rpc = proxy.start() module.dispatch_event = event_dispatcher(config) module.config = config module.disconnect = proxy.stop return module
['def', 'make_nameko_helper', '(', 'config', ')', ':', 'module', '=', 'ModuleType', '(', "'nameko'", ')', 'module', '.', '__doc__', '=', '"""Nameko shell helper for making rpc calls and dispatching\nevents.\n\nUsage:\n >>> n.rpc.service.method()\n "reply"\n\n >>> n.dispatch_event(\'service\', \'event_type\', \'event_data\')\n"""', 'proxy', '=', 'ClusterRpcProxy', '(', 'config', ')', 'module', '.', 'rpc', '=', 'proxy', '.', 'start', '(', ')', 'module', '.', 'dispatch_event', '=', 'event_dispatcher', '(', 'config', ')', 'module', '.', 'config', '=', 'config', 'module', '.', 'disconnect', '=', 'proxy', '.', 'stop', 'return', 'module']
Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage.
['Create', 'a', 'fake', 'module', 'that', 'provides', 'some', 'convenient', 'access', 'to', 'nameko', 'standalone', 'functionality', 'for', 'interactive', 'shell', 'usage', '.']
train
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/cli/shell.py#L58-L77
54
earwig/mwparserfromhell
mwparserfromhell/nodes/template.py
Template.add
def add(self, name, value, showkey=None, before=None, preserve_spacing=True): """Add a parameter to the template with a given *name* and *value*. *name* and *value* can be anything parsable by :func:`.utils.parse_anything`; pipes and equal signs are automatically escaped from *value* when appropriate. If *name* is already a parameter in the template, we'll replace its value. If *showkey* is given, this will determine whether or not to show the parameter's name (e.g., ``{{foo|bar}}``'s parameter has a name of ``"1"`` but it is hidden); otherwise, we'll make a safe and intelligent guess. If *before* is given (either a :class:`.Parameter` object or a name), then we will place the parameter immediately before this one. Otherwise, it will be added at the end. If *before* is a name and exists multiple times in the template, we will place it before the last occurrence. If *before* is not in the template, :exc:`ValueError` is raised. The argument is ignored if *name* is an existing parameter. If *preserve_spacing* is ``True``, we will try to preserve whitespace conventions around the parameter, whether it is new or we are updating an existing value. It is disabled for parameters with hidden keys, since MediaWiki doesn't strip whitespace in this case. """ name, value = parse_anything(name), parse_anything(value) self._surface_escape(value, "|") if self.has(name): self.remove(name, keep_field=True) existing = self.get(name) if showkey is not None: existing.showkey = showkey if not existing.showkey: self._surface_escape(value, "=") nodes = existing.value.nodes if preserve_spacing and existing.showkey: for i in range(2): # Ignore empty text nodes if not nodes[i]: nodes[i] = None existing.value = parse_anything([nodes[0], value, nodes[1]]) else: existing.value = value return existing if showkey is None: if Parameter.can_hide_key(name): int_name = int(str(name)) int_keys = set() for param in self.params: if not param.showkey: int_keys.add(int(str(param.name))) expected = min(set(range(1, len(int_keys) + 2)) - int_keys) if expected == int_name: showkey = False else: showkey = True else: showkey = True if not showkey: self._surface_escape(value, "=") if preserve_spacing and showkey: before_n, after_n = self._get_spacing_conventions(use_names=True) before_v, after_v = self._get_spacing_conventions(use_names=False) name = parse_anything([before_n, name, after_n]) value = parse_anything([before_v, value, after_v]) param = Parameter(name, value, showkey) if before: if not isinstance(before, Parameter): before = self.get(before) self.params.insert(self.params.index(before), param) else: self.params.append(param) return param
python
def add(self, name, value, showkey=None, before=None, preserve_spacing=True): """Add a parameter to the template with a given *name* and *value*. *name* and *value* can be anything parsable by :func:`.utils.parse_anything`; pipes and equal signs are automatically escaped from *value* when appropriate. If *name* is already a parameter in the template, we'll replace its value. If *showkey* is given, this will determine whether or not to show the parameter's name (e.g., ``{{foo|bar}}``'s parameter has a name of ``"1"`` but it is hidden); otherwise, we'll make a safe and intelligent guess. If *before* is given (either a :class:`.Parameter` object or a name), then we will place the parameter immediately before this one. Otherwise, it will be added at the end. If *before* is a name and exists multiple times in the template, we will place it before the last occurrence. If *before* is not in the template, :exc:`ValueError` is raised. The argument is ignored if *name* is an existing parameter. If *preserve_spacing* is ``True``, we will try to preserve whitespace conventions around the parameter, whether it is new or we are updating an existing value. It is disabled for parameters with hidden keys, since MediaWiki doesn't strip whitespace in this case. """ name, value = parse_anything(name), parse_anything(value) self._surface_escape(value, "|") if self.has(name): self.remove(name, keep_field=True) existing = self.get(name) if showkey is not None: existing.showkey = showkey if not existing.showkey: self._surface_escape(value, "=") nodes = existing.value.nodes if preserve_spacing and existing.showkey: for i in range(2): # Ignore empty text nodes if not nodes[i]: nodes[i] = None existing.value = parse_anything([nodes[0], value, nodes[1]]) else: existing.value = value return existing if showkey is None: if Parameter.can_hide_key(name): int_name = int(str(name)) int_keys = set() for param in self.params: if not param.showkey: int_keys.add(int(str(param.name))) expected = min(set(range(1, len(int_keys) + 2)) - int_keys) if expected == int_name: showkey = False else: showkey = True else: showkey = True if not showkey: self._surface_escape(value, "=") if preserve_spacing and showkey: before_n, after_n = self._get_spacing_conventions(use_names=True) before_v, after_v = self._get_spacing_conventions(use_names=False) name = parse_anything([before_n, name, after_n]) value = parse_anything([before_v, value, after_v]) param = Parameter(name, value, showkey) if before: if not isinstance(before, Parameter): before = self.get(before) self.params.insert(self.params.index(before), param) else: self.params.append(param) return param
['def', 'add', '(', 'self', ',', 'name', ',', 'value', ',', 'showkey', '=', 'None', ',', 'before', '=', 'None', ',', 'preserve_spacing', '=', 'True', ')', ':', 'name', ',', 'value', '=', 'parse_anything', '(', 'name', ')', ',', 'parse_anything', '(', 'value', ')', 'self', '.', '_surface_escape', '(', 'value', ',', '"|"', ')', 'if', 'self', '.', 'has', '(', 'name', ')', ':', 'self', '.', 'remove', '(', 'name', ',', 'keep_field', '=', 'True', ')', 'existing', '=', 'self', '.', 'get', '(', 'name', ')', 'if', 'showkey', 'is', 'not', 'None', ':', 'existing', '.', 'showkey', '=', 'showkey', 'if', 'not', 'existing', '.', 'showkey', ':', 'self', '.', '_surface_escape', '(', 'value', ',', '"="', ')', 'nodes', '=', 'existing', '.', 'value', '.', 'nodes', 'if', 'preserve_spacing', 'and', 'existing', '.', 'showkey', ':', 'for', 'i', 'in', 'range', '(', '2', ')', ':', '# Ignore empty text nodes', 'if', 'not', 'nodes', '[', 'i', ']', ':', 'nodes', '[', 'i', ']', '=', 'None', 'existing', '.', 'value', '=', 'parse_anything', '(', '[', 'nodes', '[', '0', ']', ',', 'value', ',', 'nodes', '[', '1', ']', ']', ')', 'else', ':', 'existing', '.', 'value', '=', 'value', 'return', 'existing', 'if', 'showkey', 'is', 'None', ':', 'if', 'Parameter', '.', 'can_hide_key', '(', 'name', ')', ':', 'int_name', '=', 'int', '(', 'str', '(', 'name', ')', ')', 'int_keys', '=', 'set', '(', ')', 'for', 'param', 'in', 'self', '.', 'params', ':', 'if', 'not', 'param', '.', 'showkey', ':', 'int_keys', '.', 'add', '(', 'int', '(', 'str', '(', 'param', '.', 'name', ')', ')', ')', 'expected', '=', 'min', '(', 'set', '(', 'range', '(', '1', ',', 'len', '(', 'int_keys', ')', '+', '2', ')', ')', '-', 'int_keys', ')', 'if', 'expected', '==', 'int_name', ':', 'showkey', '=', 'False', 'else', ':', 'showkey', '=', 'True', 'else', ':', 'showkey', '=', 'True', 'if', 'not', 'showkey', ':', 'self', '.', '_surface_escape', '(', 'value', ',', '"="', ')', 'if', 'preserve_spacing', 'and', 'showkey', ':', 'before_n', ',', 'after_n', '=', 'self', '.', '_get_spacing_conventions', '(', 'use_names', '=', 'True', ')', 'before_v', ',', 'after_v', '=', 'self', '.', '_get_spacing_conventions', '(', 'use_names', '=', 'False', ')', 'name', '=', 'parse_anything', '(', '[', 'before_n', ',', 'name', ',', 'after_n', ']', ')', 'value', '=', 'parse_anything', '(', '[', 'before_v', ',', 'value', ',', 'after_v', ']', ')', 'param', '=', 'Parameter', '(', 'name', ',', 'value', ',', 'showkey', ')', 'if', 'before', ':', 'if', 'not', 'isinstance', '(', 'before', ',', 'Parameter', ')', ':', 'before', '=', 'self', '.', 'get', '(', 'before', ')', 'self', '.', 'params', '.', 'insert', '(', 'self', '.', 'params', '.', 'index', '(', 'before', ')', ',', 'param', ')', 'else', ':', 'self', '.', 'params', '.', 'append', '(', 'param', ')', 'return', 'param']
Add a parameter to the template with a given *name* and *value*. *name* and *value* can be anything parsable by :func:`.utils.parse_anything`; pipes and equal signs are automatically escaped from *value* when appropriate. If *name* is already a parameter in the template, we'll replace its value. If *showkey* is given, this will determine whether or not to show the parameter's name (e.g., ``{{foo|bar}}``'s parameter has a name of ``"1"`` but it is hidden); otherwise, we'll make a safe and intelligent guess. If *before* is given (either a :class:`.Parameter` object or a name), then we will place the parameter immediately before this one. Otherwise, it will be added at the end. If *before* is a name and exists multiple times in the template, we will place it before the last occurrence. If *before* is not in the template, :exc:`ValueError` is raised. The argument is ignored if *name* is an existing parameter. If *preserve_spacing* is ``True``, we will try to preserve whitespace conventions around the parameter, whether it is new or we are updating an existing value. It is disabled for parameters with hidden keys, since MediaWiki doesn't strip whitespace in this case.
['Add', 'a', 'parameter', 'to', 'the', 'template', 'with', 'a', 'given', '*', 'name', '*', 'and', '*', 'value', '*', '.']
train
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/nodes/template.py#L232-L310
55
liamw9534/bt-manager
bt_manager/audio.py
SBCAudioCodec.register_transport_ready_event
def register_transport_ready_event(self, user_cb, user_arg): """ Register for transport ready events. The `transport ready` event is raised via a user callback. If the endpoint is configured as a source, then the user may then call :py:meth:`write_transport` in order to send data to the associated sink. Otherwise, if the endpoint is configured as a sink, then the user may call :py:meth:`read_transport` to read from the associated source instead. :param func user_cb: User defined callback function. It must take one parameter which is the user's callback argument. :param user_arg: User defined callback argument. :return: See also: :py:meth:`unregister_transport_ready_event` """ self.user_cb = user_cb self.user_arg = user_arg
python
def register_transport_ready_event(self, user_cb, user_arg): """ Register for transport ready events. The `transport ready` event is raised via a user callback. If the endpoint is configured as a source, then the user may then call :py:meth:`write_transport` in order to send data to the associated sink. Otherwise, if the endpoint is configured as a sink, then the user may call :py:meth:`read_transport` to read from the associated source instead. :param func user_cb: User defined callback function. It must take one parameter which is the user's callback argument. :param user_arg: User defined callback argument. :return: See also: :py:meth:`unregister_transport_ready_event` """ self.user_cb = user_cb self.user_arg = user_arg
['def', 'register_transport_ready_event', '(', 'self', ',', 'user_cb', ',', 'user_arg', ')', ':', 'self', '.', 'user_cb', '=', 'user_cb', 'self', '.', 'user_arg', '=', 'user_arg']
Register for transport ready events. The `transport ready` event is raised via a user callback. If the endpoint is configured as a source, then the user may then call :py:meth:`write_transport` in order to send data to the associated sink. Otherwise, if the endpoint is configured as a sink, then the user may call :py:meth:`read_transport` to read from the associated source instead. :param func user_cb: User defined callback function. It must take one parameter which is the user's callback argument. :param user_arg: User defined callback argument. :return: See also: :py:meth:`unregister_transport_ready_event`
['Register', 'for', 'transport', 'ready', 'events', '.', 'The', 'transport', 'ready', 'event', 'is', 'raised', 'via', 'a', 'user', 'callback', '.', 'If', 'the', 'endpoint', 'is', 'configured', 'as', 'a', 'source', 'then', 'the', 'user', 'may', 'then', 'call', ':', 'py', ':', 'meth', ':', 'write_transport', 'in', 'order', 'to', 'send', 'data', 'to', 'the', 'associated', 'sink', '.', 'Otherwise', 'if', 'the', 'endpoint', 'is', 'configured', 'as', 'a', 'sink', 'then', 'the', 'user', 'may', 'call', ':', 'py', ':', 'meth', ':', 'read_transport', 'to', 'read', 'from', 'the', 'associated', 'source', 'instead', '.']
train
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/audio.py#L207-L227
56
LesPatamechanix/patalib
src/patalib/anomaly.py
Anomaly.generate_anomaly
def generate_anomaly(self, input_word, list_of_dict_words, num): """ Generate an anomaly. This is done via a Psuedo-random number generator. """ results = [] for i in range(0,num): index = randint(0,len(list_of_dict_words)-1) name = list_of_dict_words[index] if name != input_word and name not in results: results.append(PataLib().strip_underscore(name)) else: i = i +1 results = {'input' : input_word, 'results' : results, 'category' : 'anomaly'} return results
python
def generate_anomaly(self, input_word, list_of_dict_words, num): """ Generate an anomaly. This is done via a Psuedo-random number generator. """ results = [] for i in range(0,num): index = randint(0,len(list_of_dict_words)-1) name = list_of_dict_words[index] if name != input_word and name not in results: results.append(PataLib().strip_underscore(name)) else: i = i +1 results = {'input' : input_word, 'results' : results, 'category' : 'anomaly'} return results
['def', 'generate_anomaly', '(', 'self', ',', 'input_word', ',', 'list_of_dict_words', ',', 'num', ')', ':', 'results', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'num', ')', ':', 'index', '=', 'randint', '(', '0', ',', 'len', '(', 'list_of_dict_words', ')', '-', '1', ')', 'name', '=', 'list_of_dict_words', '[', 'index', ']', 'if', 'name', '!=', 'input_word', 'and', 'name', 'not', 'in', 'results', ':', 'results', '.', 'append', '(', 'PataLib', '(', ')', '.', 'strip_underscore', '(', 'name', ')', ')', 'else', ':', 'i', '=', 'i', '+', '1', 'results', '=', '{', "'input'", ':', 'input_word', ',', "'results'", ':', 'results', ',', "'category'", ':', "'anomaly'", '}', 'return', 'results']
Generate an anomaly. This is done via a Psuedo-random number generator.
['Generate', 'an', 'anomaly', '.', 'This', 'is', 'done', 'via', 'a', 'Psuedo', '-', 'random', 'number', 'generator', '.']
train
https://github.com/LesPatamechanix/patalib/blob/d88cca409b1750fdeb88cece048b308f2a710955/src/patalib/anomaly.py#L9-L23
57
cqlengine/cqlengine
cqlengine/query.py
AbstractQuerySet.count
def count(self): """ Returns the number of rows matched by this query """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: query = self._select_query() query.count = True result = self._execute(query) return result[0]['count'] else: return len(self._result_cache)
python
def count(self): """ Returns the number of rows matched by this query """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: query = self._select_query() query.count = True result = self._execute(query) return result[0]['count'] else: return len(self._result_cache)
['def', 'count', '(', 'self', ')', ':', 'if', 'self', '.', '_batch', ':', 'raise', 'CQLEngineException', '(', '"Only inserts, updates, and deletes are available in batch mode"', ')', 'if', 'self', '.', '_result_cache', 'is', 'None', ':', 'query', '=', 'self', '.', '_select_query', '(', ')', 'query', '.', 'count', '=', 'True', 'result', '=', 'self', '.', '_execute', '(', 'query', ')', 'return', 'result', '[', '0', ']', '[', "'count'", ']', 'else', ':', 'return', 'len', '(', 'self', '.', '_result_cache', ')']
Returns the number of rows matched by this query
['Returns', 'the', 'number', 'of', 'rows', 'matched', 'by', 'this', 'query']
train
https://github.com/cqlengine/cqlengine/blob/7079eaf7071cbf5a045e1d1ab57f6d1b5ba3f9dc/cqlengine/query.py#L568-L579
58
chaimleib/intervaltree
intervaltree/intervaltree.py
IntervalTree.remove
def remove(self, interval): """ Removes an interval from the tree, if present. If not, raises ValueError. Completes in O(log n) time. """ #self.verify() if interval not in self: #print(self.all_intervals) raise ValueError self.top_node = self.top_node.remove(interval) self.all_intervals.remove(interval) self._remove_boundaries(interval)
python
def remove(self, interval): """ Removes an interval from the tree, if present. If not, raises ValueError. Completes in O(log n) time. """ #self.verify() if interval not in self: #print(self.all_intervals) raise ValueError self.top_node = self.top_node.remove(interval) self.all_intervals.remove(interval) self._remove_boundaries(interval)
['def', 'remove', '(', 'self', ',', 'interval', ')', ':', '#self.verify()', 'if', 'interval', 'not', 'in', 'self', ':', '#print(self.all_intervals)', 'raise', 'ValueError', 'self', '.', 'top_node', '=', 'self', '.', 'top_node', '.', 'remove', '(', 'interval', ')', 'self', '.', 'all_intervals', '.', 'remove', '(', 'interval', ')', 'self', '.', '_remove_boundaries', '(', 'interval', ')']
Removes an interval from the tree, if present. If not, raises ValueError. Completes in O(log n) time.
['Removes', 'an', 'interval', 'from', 'the', 'tree', 'if', 'present', '.', 'If', 'not', 'raises', 'ValueError', '.']
train
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L356-L369
59
aws/sagemaker-python-sdk
src/sagemaker/predictor.py
RealTimePredictor.delete_model
def delete_model(self): """Deletes the Amazon SageMaker models backing this predictor. """ request_failed = False failed_models = [] for model_name in self._model_names: try: self.sagemaker_session.delete_model(model_name) except Exception: # pylint: disable=broad-except request_failed = True failed_models.append(model_name) if request_failed: raise Exception('One or more models cannot be deleted, please retry. \n' 'Failed models: {}'.format(', '.join(failed_models)))
python
def delete_model(self): """Deletes the Amazon SageMaker models backing this predictor. """ request_failed = False failed_models = [] for model_name in self._model_names: try: self.sagemaker_session.delete_model(model_name) except Exception: # pylint: disable=broad-except request_failed = True failed_models.append(model_name) if request_failed: raise Exception('One or more models cannot be deleted, please retry. \n' 'Failed models: {}'.format(', '.join(failed_models)))
['def', 'delete_model', '(', 'self', ')', ':', 'request_failed', '=', 'False', 'failed_models', '=', '[', ']', 'for', 'model_name', 'in', 'self', '.', '_model_names', ':', 'try', ':', 'self', '.', 'sagemaker_session', '.', 'delete_model', '(', 'model_name', ')', 'except', 'Exception', ':', '# pylint: disable=broad-except', 'request_failed', '=', 'True', 'failed_models', '.', 'append', '(', 'model_name', ')', 'if', 'request_failed', ':', 'raise', 'Exception', '(', "'One or more models cannot be deleted, please retry. \\n'", "'Failed models: {}'", '.', 'format', '(', "', '", '.', 'join', '(', 'failed_models', ')', ')', ')']
Deletes the Amazon SageMaker models backing this predictor.
['Deletes', 'the', 'Amazon', 'SageMaker', 'models', 'backing', 'this', 'predictor', '.']
train
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/predictor.py#L131-L146
60
google/mobly
mobly/asserts.py
assert_raises
def assert_raises(expected_exception, extras=None, *args, **kwargs): """Assert that an exception is raised when a function is called. If no exception is raised, test fail. If an exception is raised but not of the expected type, the exception is let through. This should only be used as a context manager: with assert_raises(Exception): func() Args: expected_exception: An exception class that is expected to be raised. extras: An optional field for extra information to be included in test result. """ context = _AssertRaisesContext(expected_exception, extras=extras) return context
python
def assert_raises(expected_exception, extras=None, *args, **kwargs): """Assert that an exception is raised when a function is called. If no exception is raised, test fail. If an exception is raised but not of the expected type, the exception is let through. This should only be used as a context manager: with assert_raises(Exception): func() Args: expected_exception: An exception class that is expected to be raised. extras: An optional field for extra information to be included in test result. """ context = _AssertRaisesContext(expected_exception, extras=extras) return context
['def', 'assert_raises', '(', 'expected_exception', ',', 'extras', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'context', '=', '_AssertRaisesContext', '(', 'expected_exception', ',', 'extras', '=', 'extras', ')', 'return', 'context']
Assert that an exception is raised when a function is called. If no exception is raised, test fail. If an exception is raised but not of the expected type, the exception is let through. This should only be used as a context manager: with assert_raises(Exception): func() Args: expected_exception: An exception class that is expected to be raised. extras: An optional field for extra information to be included in test result.
['Assert', 'that', 'an', 'exception', 'is', 'raised', 'when', 'a', 'function', 'is', 'called', '.']
train
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/asserts.py#L60-L77
61
saltstack/salt
salt/modules/mandrill.py
_http_request
def _http_request(url, headers=None, data=None): ''' Make the HTTP request and return the body as python object. ''' if not headers: headers = _get_headers() session = requests.session() log.debug('Querying %s', url) req = session.post(url, headers=headers, data=salt.utils.json.dumps(data)) req_body = req.json() ret = _default_ret() log.debug('Status code: %d', req.status_code) log.debug('Response body:') log.debug(req_body) if req.status_code != 200: if req.status_code == 500: ret['comment'] = req_body.pop('message', '') ret['out'] = req_body return ret ret.update({ 'comment': req_body.get('error', '') }) return ret ret.update({ 'result': True, 'out': req.json() }) return ret
python
def _http_request(url, headers=None, data=None): ''' Make the HTTP request and return the body as python object. ''' if not headers: headers = _get_headers() session = requests.session() log.debug('Querying %s', url) req = session.post(url, headers=headers, data=salt.utils.json.dumps(data)) req_body = req.json() ret = _default_ret() log.debug('Status code: %d', req.status_code) log.debug('Response body:') log.debug(req_body) if req.status_code != 200: if req.status_code == 500: ret['comment'] = req_body.pop('message', '') ret['out'] = req_body return ret ret.update({ 'comment': req_body.get('error', '') }) return ret ret.update({ 'result': True, 'out': req.json() }) return ret
['def', '_http_request', '(', 'url', ',', 'headers', '=', 'None', ',', 'data', '=', 'None', ')', ':', 'if', 'not', 'headers', ':', 'headers', '=', '_get_headers', '(', ')', 'session', '=', 'requests', '.', 'session', '(', ')', 'log', '.', 'debug', '(', "'Querying %s'", ',', 'url', ')', 'req', '=', 'session', '.', 'post', '(', 'url', ',', 'headers', '=', 'headers', ',', 'data', '=', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'data', ')', ')', 'req_body', '=', 'req', '.', 'json', '(', ')', 'ret', '=', '_default_ret', '(', ')', 'log', '.', 'debug', '(', "'Status code: %d'", ',', 'req', '.', 'status_code', ')', 'log', '.', 'debug', '(', "'Response body:'", ')', 'log', '.', 'debug', '(', 'req_body', ')', 'if', 'req', '.', 'status_code', '!=', '200', ':', 'if', 'req', '.', 'status_code', '==', '500', ':', 'ret', '[', "'comment'", ']', '=', 'req_body', '.', 'pop', '(', "'message'", ',', "''", ')', 'ret', '[', "'out'", ']', '=', 'req_body', 'return', 'ret', 'ret', '.', 'update', '(', '{', "'comment'", ':', 'req_body', '.', 'get', '(', "'error'", ',', "''", ')', '}', ')', 'return', 'ret', 'ret', '.', 'update', '(', '{', "'result'", ':', 'True', ',', "'out'", ':', 'req', '.', 'json', '(', ')', '}', ')', 'return', 'ret']
Make the HTTP request and return the body as python object.
['Make', 'the', 'HTTP', 'request', 'and', 'return', 'the', 'body', 'as', 'python', 'object', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mandrill.py#L101-L132
62
saltstack/salt
salt/returners/sqlite3_return.py
save_load
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' log.debug('sqlite3 returner <save_load> called jid: %s load: %s', jid, load) conn = _get_conn(ret=None) cur = conn.cursor() sql = '''INSERT INTO jids (jid, load) VALUES (:jid, :load)''' cur.execute(sql, {'jid': jid, 'load': salt.utils.json.dumps(load)}) _close_conn(conn)
python
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' log.debug('sqlite3 returner <save_load> called jid: %s load: %s', jid, load) conn = _get_conn(ret=None) cur = conn.cursor() sql = '''INSERT INTO jids (jid, load) VALUES (:jid, :load)''' cur.execute(sql, {'jid': jid, 'load': salt.utils.json.dumps(load)}) _close_conn(conn)
['def', 'save_load', '(', 'jid', ',', 'load', ',', 'minions', '=', 'None', ')', ':', 'log', '.', 'debug', '(', "'sqlite3 returner <save_load> called jid: %s load: %s'", ',', 'jid', ',', 'load', ')', 'conn', '=', '_get_conn', '(', 'ret', '=', 'None', ')', 'cur', '=', 'conn', '.', 'cursor', '(', ')', 'sql', '=', "'''INSERT INTO jids (jid, load) VALUES (:jid, :load)'''", 'cur', '.', 'execute', '(', 'sql', ',', '{', "'jid'", ':', 'jid', ',', "'load'", ':', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'load', ')', '}', ')', '_close_conn', '(', 'conn', ')']
Save the load to the specified jid
['Save', 'the', 'load', 'to', 'the', 'specified', 'jid']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/sqlite3_return.py#L182-L193
63
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py
CommandGeneratorAction.get_presig
def get_presig(self, target, source, env, executor=None): """Return the signature contents of this action's command line. This strips $(-$) and everything in between the string, since those parts don't affect signatures. """ return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
python
def get_presig(self, target, source, env, executor=None): """Return the signature contents of this action's command line. This strips $(-$) and everything in between the string, since those parts don't affect signatures. """ return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
['def', 'get_presig', '(', 'self', ',', 'target', ',', 'source', ',', 'env', ',', 'executor', '=', 'None', ')', ':', 'return', 'self', '.', '_generate', '(', 'target', ',', 'source', ',', 'env', ',', '1', ',', 'executor', ')', '.', 'get_presig', '(', 'target', ',', 'source', ',', 'env', ')']
Return the signature contents of this action's command line. This strips $(-$) and everything in between the string, since those parts don't affect signatures.
['Return', 'the', 'signature', 'contents', 'of', 'this', 'action', 's', 'command', 'line', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py#L1043-L1049
64
cmbruns/pyopenvr
src/openvr/__init__.py
IVROverlay.getOverlayTransformAbsolute
def getOverlayTransformAbsolute(self, ulOverlayHandle): """Gets the transform if it is absolute. Returns an error if the transform is some other type.""" fn = self.function_table.getOverlayTransformAbsolute peTrackingOrigin = ETrackingUniverseOrigin() pmatTrackingOriginToOverlayTransform = HmdMatrix34_t() result = fn(ulOverlayHandle, byref(peTrackingOrigin), byref(pmatTrackingOriginToOverlayTransform)) return result, peTrackingOrigin, pmatTrackingOriginToOverlayTransform
python
def getOverlayTransformAbsolute(self, ulOverlayHandle): """Gets the transform if it is absolute. Returns an error if the transform is some other type.""" fn = self.function_table.getOverlayTransformAbsolute peTrackingOrigin = ETrackingUniverseOrigin() pmatTrackingOriginToOverlayTransform = HmdMatrix34_t() result = fn(ulOverlayHandle, byref(peTrackingOrigin), byref(pmatTrackingOriginToOverlayTransform)) return result, peTrackingOrigin, pmatTrackingOriginToOverlayTransform
['def', 'getOverlayTransformAbsolute', '(', 'self', ',', 'ulOverlayHandle', ')', ':', 'fn', '=', 'self', '.', 'function_table', '.', 'getOverlayTransformAbsolute', 'peTrackingOrigin', '=', 'ETrackingUniverseOrigin', '(', ')', 'pmatTrackingOriginToOverlayTransform', '=', 'HmdMatrix34_t', '(', ')', 'result', '=', 'fn', '(', 'ulOverlayHandle', ',', 'byref', '(', 'peTrackingOrigin', ')', ',', 'byref', '(', 'pmatTrackingOriginToOverlayTransform', ')', ')', 'return', 'result', ',', 'peTrackingOrigin', ',', 'pmatTrackingOriginToOverlayTransform']
Gets the transform if it is absolute. Returns an error if the transform is some other type.
['Gets', 'the', 'transform', 'if', 'it', 'is', 'absolute', '.', 'Returns', 'an', 'error', 'if', 'the', 'transform', 'is', 'some', 'other', 'type', '.']
train
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4852-L4859
65
gem/oq-engine
openquake/hazardlib/geo/geodetic.py
spherical_to_cartesian
def spherical_to_cartesian(lons, lats, depths=None): """ Return the position vectors (in Cartesian coordinates) of list of spherical coordinates. For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html. Parameters are components of spherical coordinates in a form of scalars, lists or numpy arrays. ``depths`` can be ``None`` in which case it's considered zero for all points. :returns: ``numpy.array`` of 3d vectors representing points' coordinates in Cartesian space in km. The array has shape `lons.shape + (3,)`. In particular, if ``lons`` and ``lats`` are scalars the result is a 3D vector and if they are vectors the result is a matrix of shape (N, 3). See also :func:`cartesian_to_spherical`. """ phi = numpy.radians(lons) theta = numpy.radians(lats) if depths is None: rr = EARTH_RADIUS else: rr = EARTH_RADIUS - numpy.array(depths) cos_theta_r = rr * numpy.cos(theta) try: shape = lons.shape except AttributeError: # a list/tuple was passed try: shape = (len(lons),) except TypeError: # a scalar was passed shape = () arr = numpy.zeros(shape + (3,)) arr[..., 0] = cos_theta_r * numpy.cos(phi) arr[..., 1] = cos_theta_r * numpy.sin(phi) arr[..., 2] = rr * numpy.sin(theta) return arr
python
def spherical_to_cartesian(lons, lats, depths=None): """ Return the position vectors (in Cartesian coordinates) of list of spherical coordinates. For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html. Parameters are components of spherical coordinates in a form of scalars, lists or numpy arrays. ``depths`` can be ``None`` in which case it's considered zero for all points. :returns: ``numpy.array`` of 3d vectors representing points' coordinates in Cartesian space in km. The array has shape `lons.shape + (3,)`. In particular, if ``lons`` and ``lats`` are scalars the result is a 3D vector and if they are vectors the result is a matrix of shape (N, 3). See also :func:`cartesian_to_spherical`. """ phi = numpy.radians(lons) theta = numpy.radians(lats) if depths is None: rr = EARTH_RADIUS else: rr = EARTH_RADIUS - numpy.array(depths) cos_theta_r = rr * numpy.cos(theta) try: shape = lons.shape except AttributeError: # a list/tuple was passed try: shape = (len(lons),) except TypeError: # a scalar was passed shape = () arr = numpy.zeros(shape + (3,)) arr[..., 0] = cos_theta_r * numpy.cos(phi) arr[..., 1] = cos_theta_r * numpy.sin(phi) arr[..., 2] = rr * numpy.sin(theta) return arr
['def', 'spherical_to_cartesian', '(', 'lons', ',', 'lats', ',', 'depths', '=', 'None', ')', ':', 'phi', '=', 'numpy', '.', 'radians', '(', 'lons', ')', 'theta', '=', 'numpy', '.', 'radians', '(', 'lats', ')', 'if', 'depths', 'is', 'None', ':', 'rr', '=', 'EARTH_RADIUS', 'else', ':', 'rr', '=', 'EARTH_RADIUS', '-', 'numpy', '.', 'array', '(', 'depths', ')', 'cos_theta_r', '=', 'rr', '*', 'numpy', '.', 'cos', '(', 'theta', ')', 'try', ':', 'shape', '=', 'lons', '.', 'shape', 'except', 'AttributeError', ':', '# a list/tuple was passed', 'try', ':', 'shape', '=', '(', 'len', '(', 'lons', ')', ',', ')', 'except', 'TypeError', ':', '# a scalar was passed', 'shape', '=', '(', ')', 'arr', '=', 'numpy', '.', 'zeros', '(', 'shape', '+', '(', '3', ',', ')', ')', 'arr', '[', '...', ',', '0', ']', '=', 'cos_theta_r', '*', 'numpy', '.', 'cos', '(', 'phi', ')', 'arr', '[', '...', ',', '1', ']', '=', 'cos_theta_r', '*', 'numpy', '.', 'sin', '(', 'phi', ')', 'arr', '[', '...', ',', '2', ']', '=', 'rr', '*', 'numpy', '.', 'sin', '(', 'theta', ')', 'return', 'arr']
Return the position vectors (in Cartesian coordinates) of list of spherical coordinates. For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html. Parameters are components of spherical coordinates in a form of scalars, lists or numpy arrays. ``depths`` can be ``None`` in which case it's considered zero for all points. :returns: ``numpy.array`` of 3d vectors representing points' coordinates in Cartesian space in km. The array has shape `lons.shape + (3,)`. In particular, if ``lons`` and ``lats`` are scalars the result is a 3D vector and if they are vectors the result is a matrix of shape (N, 3). See also :func:`cartesian_to_spherical`.
['Return', 'the', 'position', 'vectors', '(', 'in', 'Cartesian', 'coordinates', ')', 'of', 'list', 'of', 'spherical', 'coordinates', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/geodetic.py#L183-L221
66
iterative/dvc
dvc/command/metrics.py
show_metrics
def show_metrics(metrics, all_branches=False, all_tags=False): """ Args: metrics (list): Where each element is either a `list` if an xpath was specified, otherwise a `str` """ for branch, val in metrics.items(): if all_branches or all_tags: logger.info("{branch}:".format(branch=branch)) for fname, metric in val.items(): lines = metric if type(metric) is list else metric.splitlines() if len(lines) > 1: logger.info("\t{fname}:".format(fname=fname)) for line in lines: logger.info("\t\t{content}".format(content=line)) else: logger.info("\t{}: {}".format(fname, metric))
python
def show_metrics(metrics, all_branches=False, all_tags=False): """ Args: metrics (list): Where each element is either a `list` if an xpath was specified, otherwise a `str` """ for branch, val in metrics.items(): if all_branches or all_tags: logger.info("{branch}:".format(branch=branch)) for fname, metric in val.items(): lines = metric if type(metric) is list else metric.splitlines() if len(lines) > 1: logger.info("\t{fname}:".format(fname=fname)) for line in lines: logger.info("\t\t{content}".format(content=line)) else: logger.info("\t{}: {}".format(fname, metric))
['def', 'show_metrics', '(', 'metrics', ',', 'all_branches', '=', 'False', ',', 'all_tags', '=', 'False', ')', ':', 'for', 'branch', ',', 'val', 'in', 'metrics', '.', 'items', '(', ')', ':', 'if', 'all_branches', 'or', 'all_tags', ':', 'logger', '.', 'info', '(', '"{branch}:"', '.', 'format', '(', 'branch', '=', 'branch', ')', ')', 'for', 'fname', ',', 'metric', 'in', 'val', '.', 'items', '(', ')', ':', 'lines', '=', 'metric', 'if', 'type', '(', 'metric', ')', 'is', 'list', 'else', 'metric', '.', 'splitlines', '(', ')', 'if', 'len', '(', 'lines', ')', '>', '1', ':', 'logger', '.', 'info', '(', '"\\t{fname}:"', '.', 'format', '(', 'fname', '=', 'fname', ')', ')', 'for', 'line', 'in', 'lines', ':', 'logger', '.', 'info', '(', '"\\t\\t{content}"', '.', 'format', '(', 'content', '=', 'line', ')', ')', 'else', ':', 'logger', '.', 'info', '(', '"\\t{}: {}"', '.', 'format', '(', 'fname', ',', 'metric', ')', ')']
Args: metrics (list): Where each element is either a `list` if an xpath was specified, otherwise a `str`
['Args', ':', 'metrics', '(', 'list', ')', ':', 'Where', 'each', 'element', 'is', 'either', 'a', 'list', 'if', 'an', 'xpath', 'was', 'specified', 'otherwise', 'a', 'str']
train
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/command/metrics.py#L13-L33
67
redbridge/molnctrl
molnctrl/cachemaker.py
monkeycache
def monkeycache(apis): """ Feed this a dictionary of api bananas, it spits out processed cache """ if isinstance(type(apis), type(None)) or apis is None: return {} verbs = set() cache = {} cache['count'] = apis['count'] cache['asyncapis'] = [] apilist = apis['api'] if apilist is None: print("[monkeycache] Server response issue, no apis found") for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) apidict = {} apidict['name'] = name apidict['description'] = getvalue(api, 'description') apidict['isasync'] = getvalue(api, 'isasync') if apidict['isasync']: cache['asyncapis'].append(name) apidict['related'] = splitcsvstring(getvalue(api, 'related')) required = [] apiparams = [] for param in getvalue(api, 'params'): apiparam = {} apiparam['name'] = getvalue(param, 'name') apiparam['description'] = getvalue(param, 'description') apiparam['required'] = (getvalue(param, 'required') is True) apiparam['length'] = int(getvalue(param, 'length')) apiparam['type'] = getvalue(param, 'type') apiparam['related'] = splitcsvstring(getvalue(param, 'related')) if apiparam['required']: required.append(apiparam['name']) apiparams.append(apiparam) apidict['requiredparams'] = required apidict['params'] = apiparams if verb not in cache: cache[verb] = {} cache[verb][subject] = apidict verbs.add(verb) cache['verbs'] = list(verbs) return cache
python
def monkeycache(apis): """ Feed this a dictionary of api bananas, it spits out processed cache """ if isinstance(type(apis), type(None)) or apis is None: return {} verbs = set() cache = {} cache['count'] = apis['count'] cache['asyncapis'] = [] apilist = apis['api'] if apilist is None: print("[monkeycache] Server response issue, no apis found") for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) apidict = {} apidict['name'] = name apidict['description'] = getvalue(api, 'description') apidict['isasync'] = getvalue(api, 'isasync') if apidict['isasync']: cache['asyncapis'].append(name) apidict['related'] = splitcsvstring(getvalue(api, 'related')) required = [] apiparams = [] for param in getvalue(api, 'params'): apiparam = {} apiparam['name'] = getvalue(param, 'name') apiparam['description'] = getvalue(param, 'description') apiparam['required'] = (getvalue(param, 'required') is True) apiparam['length'] = int(getvalue(param, 'length')) apiparam['type'] = getvalue(param, 'type') apiparam['related'] = splitcsvstring(getvalue(param, 'related')) if apiparam['required']: required.append(apiparam['name']) apiparams.append(apiparam) apidict['requiredparams'] = required apidict['params'] = apiparams if verb not in cache: cache[verb] = {} cache[verb][subject] = apidict verbs.add(verb) cache['verbs'] = list(verbs) return cache
['def', 'monkeycache', '(', 'apis', ')', ':', 'if', 'isinstance', '(', 'type', '(', 'apis', ')', ',', 'type', '(', 'None', ')', ')', 'or', 'apis', 'is', 'None', ':', 'return', '{', '}', 'verbs', '=', 'set', '(', ')', 'cache', '=', '{', '}', 'cache', '[', "'count'", ']', '=', 'apis', '[', "'count'", ']', 'cache', '[', "'asyncapis'", ']', '=', '[', ']', 'apilist', '=', 'apis', '[', "'api'", ']', 'if', 'apilist', 'is', 'None', ':', 'print', '(', '"[monkeycache] Server response issue, no apis found"', ')', 'for', 'api', 'in', 'apilist', ':', 'name', '=', 'getvalue', '(', 'api', ',', "'name'", ')', 'verb', ',', 'subject', '=', 'splitverbsubject', '(', 'name', ')', 'apidict', '=', '{', '}', 'apidict', '[', "'name'", ']', '=', 'name', 'apidict', '[', "'description'", ']', '=', 'getvalue', '(', 'api', ',', "'description'", ')', 'apidict', '[', "'isasync'", ']', '=', 'getvalue', '(', 'api', ',', "'isasync'", ')', 'if', 'apidict', '[', "'isasync'", ']', ':', 'cache', '[', "'asyncapis'", ']', '.', 'append', '(', 'name', ')', 'apidict', '[', "'related'", ']', '=', 'splitcsvstring', '(', 'getvalue', '(', 'api', ',', "'related'", ')', ')', 'required', '=', '[', ']', 'apiparams', '=', '[', ']', 'for', 'param', 'in', 'getvalue', '(', 'api', ',', "'params'", ')', ':', 'apiparam', '=', '{', '}', 'apiparam', '[', "'name'", ']', '=', 'getvalue', '(', 'param', ',', "'name'", ')', 'apiparam', '[', "'description'", ']', '=', 'getvalue', '(', 'param', ',', "'description'", ')', 'apiparam', '[', "'required'", ']', '=', '(', 'getvalue', '(', 'param', ',', "'required'", ')', 'is', 'True', ')', 'apiparam', '[', "'length'", ']', '=', 'int', '(', 'getvalue', '(', 'param', ',', "'length'", ')', ')', 'apiparam', '[', "'type'", ']', '=', 'getvalue', '(', 'param', ',', "'type'", ')', 'apiparam', '[', "'related'", ']', '=', 'splitcsvstring', '(', 'getvalue', '(', 'param', ',', "'related'", ')', ')', 'if', 'apiparam', '[', "'required'", ']', ':', 'required', '.', 'append', '(', 'apiparam', '[', "'name'", ']', ')', 'apiparams', '.', 'append', '(', 'apiparam', ')', 'apidict', '[', "'requiredparams'", ']', '=', 'required', 'apidict', '[', "'params'", ']', '=', 'apiparams', 'if', 'verb', 'not', 'in', 'cache', ':', 'cache', '[', 'verb', ']', '=', '{', '}', 'cache', '[', 'verb', ']', '[', 'subject', ']', '=', 'apidict', 'verbs', '.', 'add', '(', 'verb', ')', 'cache', '[', "'verbs'", ']', '=', 'list', '(', 'verbs', ')', 'return', 'cache']
Feed this a dictionary of api bananas, it spits out processed cache
['Feed', 'this', 'a', 'dictionary', 'of', 'api', 'bananas', 'it', 'spits', 'out', 'processed', 'cache']
train
https://github.com/redbridge/molnctrl/blob/9990ae7e522ce364bb61a735f774dc28de5f8e60/molnctrl/cachemaker.py#L82-L132
68
portfoliome/foil
foil/ftp.py
download_ftp_url
def download_ftp_url(source_url, target_uri, buffer_size=8192): """Uses urllib. thread safe?""" ensure_file_directory(target_uri) with urllib.request.urlopen(source_url) as source_file: with open(target_uri, 'wb') as target_file: shutil.copyfileobj(source_file, target_file, buffer_size)
python
def download_ftp_url(source_url, target_uri, buffer_size=8192): """Uses urllib. thread safe?""" ensure_file_directory(target_uri) with urllib.request.urlopen(source_url) as source_file: with open(target_uri, 'wb') as target_file: shutil.copyfileobj(source_file, target_file, buffer_size)
['def', 'download_ftp_url', '(', 'source_url', ',', 'target_uri', ',', 'buffer_size', '=', '8192', ')', ':', 'ensure_file_directory', '(', 'target_uri', ')', 'with', 'urllib', '.', 'request', '.', 'urlopen', '(', 'source_url', ')', 'as', 'source_file', ':', 'with', 'open', '(', 'target_uri', ',', "'wb'", ')', 'as', 'target_file', ':', 'shutil', '.', 'copyfileobj', '(', 'source_file', ',', 'target_file', ',', 'buffer_size', ')']
Uses urllib. thread safe?
['Uses', 'urllib', '.', 'thread', 'safe?']
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L90-L97
69
couchbase/couchbase-python-client
couchbase/bucket.py
Bucket.map_get
def map_get(self, key, mapkey): """ Retrieve a value from a map. :param str key: The document ID :param str mapkey: Key within the map to retrieve :return: :class:`~.ValueResult` :raise: :exc:`IndexError` if the mapkey does not exist :raise: :cb_exc:`NotFoundError` if the document does not exist. .. seealso:: :meth:`map_add` for an example """ op = SD.get(mapkey) sdres = self.lookup_in(key, op) return self._wrap_dsop(sdres, True)
python
def map_get(self, key, mapkey): """ Retrieve a value from a map. :param str key: The document ID :param str mapkey: Key within the map to retrieve :return: :class:`~.ValueResult` :raise: :exc:`IndexError` if the mapkey does not exist :raise: :cb_exc:`NotFoundError` if the document does not exist. .. seealso:: :meth:`map_add` for an example """ op = SD.get(mapkey) sdres = self.lookup_in(key, op) return self._wrap_dsop(sdres, True)
['def', 'map_get', '(', 'self', ',', 'key', ',', 'mapkey', ')', ':', 'op', '=', 'SD', '.', 'get', '(', 'mapkey', ')', 'sdres', '=', 'self', '.', 'lookup_in', '(', 'key', ',', 'op', ')', 'return', 'self', '.', '_wrap_dsop', '(', 'sdres', ',', 'True', ')']
Retrieve a value from a map. :param str key: The document ID :param str mapkey: Key within the map to retrieve :return: :class:`~.ValueResult` :raise: :exc:`IndexError` if the mapkey does not exist :raise: :cb_exc:`NotFoundError` if the document does not exist. .. seealso:: :meth:`map_add` for an example
['Retrieve', 'a', 'value', 'from', 'a', 'map', '.']
train
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L2147-L2161
70
persandstrom/python-verisure
verisure/session.py
Session.set_lock_state
def set_lock_state(self, code, device_label, state): """ Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock' """ response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def set_lock_state(self, code, device_label, state): """ Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock' """ response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
['def', 'set_lock_state', '(', 'self', ',', 'code', ',', 'device_label', ',', 'state', ')', ':', 'response', '=', 'None', 'try', ':', 'response', '=', 'requests', '.', 'put', '(', 'urls', '.', 'set_lockstate', '(', 'self', '.', '_giid', ',', 'device_label', ',', 'state', ')', ',', 'headers', '=', '{', "'Accept'", ':', "'application/json, text/javascript, */*; q=0.01'", ',', "'Content-Type'", ':', "'application/json'", ',', "'Cookie'", ':', "'vid={}'", '.', 'format', '(', 'self', '.', '_vid', ')', '}', ',', 'data', '=', 'json', '.', 'dumps', '(', '{', '"code"', ':', 'str', '(', 'code', ')', '}', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'RequestException', 'as', 'ex', ':', 'raise', 'RequestError', '(', 'ex', ')', '_validate_response', '(', 'response', ')', 'return', 'json', '.', 'loads', '(', 'response', '.', 'text', ')']
Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock'
['Lock', 'or', 'unlock']
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L309-L329
71
Sanji-IO/sanji
sanji/message.py
Message.to_event
def to_event(self): """ get rid of id, sign, tunnel and update message type Notice: this method will return a deepcopy """ msg = copy.deepcopy(self) for _ in ["id", "sign", "tunnel", "query", "param"]: if not hasattr(msg, _): continue delattr(msg, _) msg._type = Message.get_message_type(msg.__dict__) return msg
python
def to_event(self): """ get rid of id, sign, tunnel and update message type Notice: this method will return a deepcopy """ msg = copy.deepcopy(self) for _ in ["id", "sign", "tunnel", "query", "param"]: if not hasattr(msg, _): continue delattr(msg, _) msg._type = Message.get_message_type(msg.__dict__) return msg
['def', 'to_event', '(', 'self', ')', ':', 'msg', '=', 'copy', '.', 'deepcopy', '(', 'self', ')', 'for', '_', 'in', '[', '"id"', ',', '"sign"', ',', '"tunnel"', ',', '"query"', ',', '"param"', ']', ':', 'if', 'not', 'hasattr', '(', 'msg', ',', '_', ')', ':', 'continue', 'delattr', '(', 'msg', ',', '_', ')', 'msg', '.', '_type', '=', 'Message', '.', 'get_message_type', '(', 'msg', '.', '__dict__', ')', 'return', 'msg']
get rid of id, sign, tunnel and update message type Notice: this method will return a deepcopy
['get', 'rid', 'of', 'id', 'sign', 'tunnel', 'and', 'update', 'message', 'type', 'Notice', ':', 'this', 'method', 'will', 'return', 'a', 'deepcopy']
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/message.py#L209-L222
72
evhub/coconut
coconut/compiler/compiler.py
Compiler.destructuring_stmt_handle
def destructuring_stmt_handle(self, original, loc, tokens): """Process match assign blocks.""" internal_assert(len(tokens) == 2, "invalid destructuring assignment tokens", tokens) matches, item = tokens out = match_handle(loc, [matches, "in", item, None]) out += self.pattern_error(original, loc, match_to_var, match_check_var) return out
python
def destructuring_stmt_handle(self, original, loc, tokens): """Process match assign blocks.""" internal_assert(len(tokens) == 2, "invalid destructuring assignment tokens", tokens) matches, item = tokens out = match_handle(loc, [matches, "in", item, None]) out += self.pattern_error(original, loc, match_to_var, match_check_var) return out
['def', 'destructuring_stmt_handle', '(', 'self', ',', 'original', ',', 'loc', ',', 'tokens', ')', ':', 'internal_assert', '(', 'len', '(', 'tokens', ')', '==', '2', ',', '"invalid destructuring assignment tokens"', ',', 'tokens', ')', 'matches', ',', 'item', '=', 'tokens', 'out', '=', 'match_handle', '(', 'loc', ',', '[', 'matches', ',', '"in"', ',', 'item', ',', 'None', ']', ')', 'out', '+=', 'self', '.', 'pattern_error', '(', 'original', ',', 'loc', ',', 'match_to_var', ',', 'match_check_var', ')', 'return', 'out']
Process match assign blocks.
['Process', 'match', 'assign', 'blocks', '.']
train
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1424-L1430
73
Erotemic/utool
utool/util_alg.py
knapsack_iterative_int
def knapsack_iterative_int(items, maxweight): r""" Iterative knapsack method Math: maximize \sum_{i \in T} v_i subject to \sum_{i \in T} w_i \leq W Notes: dpmat is the dynamic programming memoization matrix. dpmat[i, w] is the total value of the items with weight at most W T is idx_subset, the set of indicies in the optimal solution CommandLine: python -m utool.util_alg --exec-knapsack_iterative_int --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> weights = [1, 3, 3, 5, 2, 1] * 2 >>> items = [(w, w, i) for i, w in enumerate(weights)] >>> maxweight = 10 >>> items = [(.8, 700, 0)] >>> maxweight = 2000 >>> print('maxweight = %r' % (maxweight,)) >>> print('items = %r' % (items,)) >>> total_value, items_subset = knapsack_iterative_int(items, maxweight) >>> total_weight = sum([t[1] for t in items_subset]) >>> print('total_weight = %r' % (total_weight,)) >>> print('items_subset = %r' % (items_subset,)) >>> result = 'total_value = %.2f' % (total_value,) >>> print(result) total_value = 0.80 Ignore: DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))] KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))] """ values = [t[0] for t in items] weights = [t[1] for t in items] maxsize = maxweight + 1 # Sparse representation seems better dpmat = defaultdict(lambda: defaultdict(lambda: np.inf)) kmat = defaultdict(lambda: defaultdict(lambda: False)) idx_subset = [] # NOQA for w in range(maxsize): dpmat[0][w] = 0 # For each item consider to include it or not for idx in range(len(items)): item_val = values[idx] item_weight = weights[idx] # consider at each possible bag size for w in range(maxsize): valid_item = item_weight <= w if idx > 0: prev_val = dpmat[idx - 1][w] prev_noitem_val = dpmat[idx - 1][w - item_weight] else: prev_val = 0 prev_noitem_val = 0 withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val if valid_item and more_valuable: dpmat[idx][w] = withitem_val kmat[idx][w] = True else: dpmat[idx][w] = prev_val kmat[idx][w] = False # Trace backwards to get the items used in the solution K = maxweight for idx in reversed(range(len(items))): if kmat[idx][K]: idx_subset.append(idx) K = K - weights[idx] idx_subset = sorted(idx_subset) items_subset = [items[i] for i in idx_subset] total_value = dpmat[len(items) - 1][maxweight] return total_value, items_subset
python
def knapsack_iterative_int(items, maxweight): r""" Iterative knapsack method Math: maximize \sum_{i \in T} v_i subject to \sum_{i \in T} w_i \leq W Notes: dpmat is the dynamic programming memoization matrix. dpmat[i, w] is the total value of the items with weight at most W T is idx_subset, the set of indicies in the optimal solution CommandLine: python -m utool.util_alg --exec-knapsack_iterative_int --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> weights = [1, 3, 3, 5, 2, 1] * 2 >>> items = [(w, w, i) for i, w in enumerate(weights)] >>> maxweight = 10 >>> items = [(.8, 700, 0)] >>> maxweight = 2000 >>> print('maxweight = %r' % (maxweight,)) >>> print('items = %r' % (items,)) >>> total_value, items_subset = knapsack_iterative_int(items, maxweight) >>> total_weight = sum([t[1] for t in items_subset]) >>> print('total_weight = %r' % (total_weight,)) >>> print('items_subset = %r' % (items_subset,)) >>> result = 'total_value = %.2f' % (total_value,) >>> print(result) total_value = 0.80 Ignore: DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))] KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))] """ values = [t[0] for t in items] weights = [t[1] for t in items] maxsize = maxweight + 1 # Sparse representation seems better dpmat = defaultdict(lambda: defaultdict(lambda: np.inf)) kmat = defaultdict(lambda: defaultdict(lambda: False)) idx_subset = [] # NOQA for w in range(maxsize): dpmat[0][w] = 0 # For each item consider to include it or not for idx in range(len(items)): item_val = values[idx] item_weight = weights[idx] # consider at each possible bag size for w in range(maxsize): valid_item = item_weight <= w if idx > 0: prev_val = dpmat[idx - 1][w] prev_noitem_val = dpmat[idx - 1][w - item_weight] else: prev_val = 0 prev_noitem_val = 0 withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val if valid_item and more_valuable: dpmat[idx][w] = withitem_val kmat[idx][w] = True else: dpmat[idx][w] = prev_val kmat[idx][w] = False # Trace backwards to get the items used in the solution K = maxweight for idx in reversed(range(len(items))): if kmat[idx][K]: idx_subset.append(idx) K = K - weights[idx] idx_subset = sorted(idx_subset) items_subset = [items[i] for i in idx_subset] total_value = dpmat[len(items) - 1][maxweight] return total_value, items_subset
['def', 'knapsack_iterative_int', '(', 'items', ',', 'maxweight', ')', ':', 'values', '=', '[', 't', '[', '0', ']', 'for', 't', 'in', 'items', ']', 'weights', '=', '[', 't', '[', '1', ']', 'for', 't', 'in', 'items', ']', 'maxsize', '=', 'maxweight', '+', '1', '# Sparse representation seems better', 'dpmat', '=', 'defaultdict', '(', 'lambda', ':', 'defaultdict', '(', 'lambda', ':', 'np', '.', 'inf', ')', ')', 'kmat', '=', 'defaultdict', '(', 'lambda', ':', 'defaultdict', '(', 'lambda', ':', 'False', ')', ')', 'idx_subset', '=', '[', ']', '# NOQA', 'for', 'w', 'in', 'range', '(', 'maxsize', ')', ':', 'dpmat', '[', '0', ']', '[', 'w', ']', '=', '0', '# For each item consider to include it or not', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'items', ')', ')', ':', 'item_val', '=', 'values', '[', 'idx', ']', 'item_weight', '=', 'weights', '[', 'idx', ']', '# consider at each possible bag size', 'for', 'w', 'in', 'range', '(', 'maxsize', ')', ':', 'valid_item', '=', 'item_weight', '<=', 'w', 'if', 'idx', '>', '0', ':', 'prev_val', '=', 'dpmat', '[', 'idx', '-', '1', ']', '[', 'w', ']', 'prev_noitem_val', '=', 'dpmat', '[', 'idx', '-', '1', ']', '[', 'w', '-', 'item_weight', ']', 'else', ':', 'prev_val', '=', '0', 'prev_noitem_val', '=', '0', 'withitem_val', '=', 'item_val', '+', 'prev_noitem_val', 'more_valuable', '=', 'withitem_val', '>', 'prev_val', 'if', 'valid_item', 'and', 'more_valuable', ':', 'dpmat', '[', 'idx', ']', '[', 'w', ']', '=', 'withitem_val', 'kmat', '[', 'idx', ']', '[', 'w', ']', '=', 'True', 'else', ':', 'dpmat', '[', 'idx', ']', '[', 'w', ']', '=', 'prev_val', 'kmat', '[', 'idx', ']', '[', 'w', ']', '=', 'False', '# Trace backwards to get the items used in the solution', 'K', '=', 'maxweight', 'for', 'idx', 'in', 'reversed', '(', 'range', '(', 'len', '(', 'items', ')', ')', ')', ':', 'if', 'kmat', '[', 'idx', ']', '[', 'K', ']', ':', 'idx_subset', '.', 'append', '(', 'idx', ')', 'K', '=', 'K', '-', 'weights', '[', 'idx', ']', 'idx_subset', '=', 'sorted', '(', 'idx_subset', ')', 'items_subset', '=', '[', 'items', '[', 'i', ']', 'for', 'i', 'in', 'idx_subset', ']', 'total_value', '=', 'dpmat', '[', 'len', '(', 'items', ')', '-', '1', ']', '[', 'maxweight', ']', 'return', 'total_value', ',', 'items_subset']
r""" Iterative knapsack method Math: maximize \sum_{i \in T} v_i subject to \sum_{i \in T} w_i \leq W Notes: dpmat is the dynamic programming memoization matrix. dpmat[i, w] is the total value of the items with weight at most W T is idx_subset, the set of indicies in the optimal solution CommandLine: python -m utool.util_alg --exec-knapsack_iterative_int --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> weights = [1, 3, 3, 5, 2, 1] * 2 >>> items = [(w, w, i) for i, w in enumerate(weights)] >>> maxweight = 10 >>> items = [(.8, 700, 0)] >>> maxweight = 2000 >>> print('maxweight = %r' % (maxweight,)) >>> print('items = %r' % (items,)) >>> total_value, items_subset = knapsack_iterative_int(items, maxweight) >>> total_weight = sum([t[1] for t in items_subset]) >>> print('total_weight = %r' % (total_weight,)) >>> print('items_subset = %r' % (items_subset,)) >>> result = 'total_value = %.2f' % (total_value,) >>> print(result) total_value = 0.80 Ignore: DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))] KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))]
['r', 'Iterative', 'knapsack', 'method']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1400-L1477
74
jorgenschaefer/elpy
elpy/server.py
ElpyRPCServer.rpc_get_names
def rpc_get_names(self, filename, source, offset): """Get all possible names """ source = get_source(source) if hasattr(self.backend, "rpc_get_names"): return self.backend.rpc_get_names(filename, source, offset) else: raise Fault("get_names not implemented by current backend", code=400)
python
def rpc_get_names(self, filename, source, offset): """Get all possible names """ source = get_source(source) if hasattr(self.backend, "rpc_get_names"): return self.backend.rpc_get_names(filename, source, offset) else: raise Fault("get_names not implemented by current backend", code=400)
['def', 'rpc_get_names', '(', 'self', ',', 'filename', ',', 'source', ',', 'offset', ')', ':', 'source', '=', 'get_source', '(', 'source', ')', 'if', 'hasattr', '(', 'self', '.', 'backend', ',', '"rpc_get_names"', ')', ':', 'return', 'self', '.', 'backend', '.', 'rpc_get_names', '(', 'filename', ',', 'source', ',', 'offset', ')', 'else', ':', 'raise', 'Fault', '(', '"get_names not implemented by current backend"', ',', 'code', '=', '400', ')']
Get all possible names
['Get', 'all', 'possible', 'names']
train
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L199-L208
75
praekeltfoundation/seaworthy
seaworthy/helpers.py
fetch_image
def fetch_image(client, name): """ Fetch an image if it isn't already present. This works like ``docker pull`` and will pull the tag ``latest`` if no tag is specified in the image name. """ try: image = client.images.get(name) except docker.errors.ImageNotFound: name, tag = _parse_image_tag(name) tag = 'latest' if tag is None else tag log.info("Pulling tag '{}' for image '{}'...".format(tag, name)) image = client.images.pull(name, tag=tag) log.debug("Found image '{}' for tag '{}'".format(image.id, name)) return image
python
def fetch_image(client, name): """ Fetch an image if it isn't already present. This works like ``docker pull`` and will pull the tag ``latest`` if no tag is specified in the image name. """ try: image = client.images.get(name) except docker.errors.ImageNotFound: name, tag = _parse_image_tag(name) tag = 'latest' if tag is None else tag log.info("Pulling tag '{}' for image '{}'...".format(tag, name)) image = client.images.pull(name, tag=tag) log.debug("Found image '{}' for tag '{}'".format(image.id, name)) return image
['def', 'fetch_image', '(', 'client', ',', 'name', ')', ':', 'try', ':', 'image', '=', 'client', '.', 'images', '.', 'get', '(', 'name', ')', 'except', 'docker', '.', 'errors', '.', 'ImageNotFound', ':', 'name', ',', 'tag', '=', '_parse_image_tag', '(', 'name', ')', 'tag', '=', "'latest'", 'if', 'tag', 'is', 'None', 'else', 'tag', 'log', '.', 'info', '(', '"Pulling tag \'{}\' for image \'{}\'..."', '.', 'format', '(', 'tag', ',', 'name', ')', ')', 'image', '=', 'client', '.', 'images', '.', 'pull', '(', 'name', ',', 'tag', '=', 'tag', ')', 'log', '.', 'debug', '(', '"Found image \'{}\' for tag \'{}\'"', '.', 'format', '(', 'image', '.', 'id', ',', 'name', ')', ')', 'return', 'image']
Fetch an image if it isn't already present. This works like ``docker pull`` and will pull the tag ``latest`` if no tag is specified in the image name.
['Fetch', 'an', 'image', 'if', 'it', 'isn', 't', 'already', 'present', '.']
train
https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/helpers.py#L27-L44
76
F5Networks/f5-common-python
f5/bigip/tm/ltm/pool.py
Members.update
def update(self, **kwargs): """Call this to change the configuration of the service on the device. This method uses HTTP PUT to alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * If ``fqdn`` is in the kwargs or set as an attribute, removes the ``autopopulate`` and ``addressFamily`` keys from it. :param kwargs: keys and associated values to alter on the device """ checked = self._check_member_parameters(**kwargs) return super(Members, self)._update(**checked)
python
def update(self, **kwargs): """Call this to change the configuration of the service on the device. This method uses HTTP PUT to alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * If ``fqdn`` is in the kwargs or set as an attribute, removes the ``autopopulate`` and ``addressFamily`` keys from it. :param kwargs: keys and associated values to alter on the device """ checked = self._check_member_parameters(**kwargs) return super(Members, self)._update(**checked)
['def', 'update', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'checked', '=', 'self', '.', '_check_member_parameters', '(', '*', '*', 'kwargs', ')', 'return', 'super', '(', 'Members', ',', 'self', ')', '.', '_update', '(', '*', '*', 'checked', ')']
Call this to change the configuration of the service on the device. This method uses HTTP PUT to alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * If ``fqdn`` is in the kwargs or set as an attribute, removes the ``autopopulate`` and ``addressFamily`` keys from it. :param kwargs: keys and associated values to alter on the device
['Call', 'this', 'to', 'change', 'the', 'configuration', 'of', 'the', 'service', 'on', 'the', 'device', '.']
train
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/ltm/pool.py#L144-L160
77
ethereum/py-evm
eth/estimators/gas.py
binary_gas_search
def binary_gas_search(state: BaseState, transaction: BaseTransaction, tolerance: int=1) -> int: """ Run the transaction with various gas limits, progressively approaching the minimum needed to succeed without an OutOfGas exception. The starting range of possible estimates is: [transaction.intrinsic_gas, state.gas_limit]. After the first OutOfGas exception, the range is: (largest_limit_out_of_gas, state.gas_limit]. After the first run not out of gas, the range is: (largest_limit_out_of_gas, smallest_success]. :param int tolerance: When the range of estimates is less than tolerance, return the top of the range. :returns int: The smallest confirmed gas to not throw an OutOfGas exception, subject to tolerance. If OutOfGas is thrown at block limit, return block limit. :raises VMError: if the computation fails even when given the block gas_limit to complete """ if not hasattr(transaction, 'sender'): raise TypeError( "Transaction is missing attribute sender.", "If sending an unsigned transaction, use SpoofTransaction and provide the", "sender using the 'from' parameter") minimum_transaction = SpoofTransaction( transaction, gas=transaction.intrinsic_gas, gas_price=0, ) if _get_computation_error(state, minimum_transaction) is None: return transaction.intrinsic_gas maximum_transaction = SpoofTransaction( transaction, gas=state.gas_limit, gas_price=0, ) error = _get_computation_error(state, maximum_transaction) if error is not None: raise error minimum_viable = state.gas_limit maximum_out_of_gas = transaction.intrinsic_gas while minimum_viable - maximum_out_of_gas > tolerance: midpoint = (minimum_viable + maximum_out_of_gas) // 2 test_transaction = SpoofTransaction(transaction, gas=midpoint) if _get_computation_error(state, test_transaction) is None: minimum_viable = midpoint else: maximum_out_of_gas = midpoint return minimum_viable
python
def binary_gas_search(state: BaseState, transaction: BaseTransaction, tolerance: int=1) -> int: """ Run the transaction with various gas limits, progressively approaching the minimum needed to succeed without an OutOfGas exception. The starting range of possible estimates is: [transaction.intrinsic_gas, state.gas_limit]. After the first OutOfGas exception, the range is: (largest_limit_out_of_gas, state.gas_limit]. After the first run not out of gas, the range is: (largest_limit_out_of_gas, smallest_success]. :param int tolerance: When the range of estimates is less than tolerance, return the top of the range. :returns int: The smallest confirmed gas to not throw an OutOfGas exception, subject to tolerance. If OutOfGas is thrown at block limit, return block limit. :raises VMError: if the computation fails even when given the block gas_limit to complete """ if not hasattr(transaction, 'sender'): raise TypeError( "Transaction is missing attribute sender.", "If sending an unsigned transaction, use SpoofTransaction and provide the", "sender using the 'from' parameter") minimum_transaction = SpoofTransaction( transaction, gas=transaction.intrinsic_gas, gas_price=0, ) if _get_computation_error(state, minimum_transaction) is None: return transaction.intrinsic_gas maximum_transaction = SpoofTransaction( transaction, gas=state.gas_limit, gas_price=0, ) error = _get_computation_error(state, maximum_transaction) if error is not None: raise error minimum_viable = state.gas_limit maximum_out_of_gas = transaction.intrinsic_gas while minimum_viable - maximum_out_of_gas > tolerance: midpoint = (minimum_viable + maximum_out_of_gas) // 2 test_transaction = SpoofTransaction(transaction, gas=midpoint) if _get_computation_error(state, test_transaction) is None: minimum_viable = midpoint else: maximum_out_of_gas = midpoint return minimum_viable
['def', 'binary_gas_search', '(', 'state', ':', 'BaseState', ',', 'transaction', ':', 'BaseTransaction', ',', 'tolerance', ':', 'int', '=', '1', ')', '->', 'int', ':', 'if', 'not', 'hasattr', '(', 'transaction', ',', "'sender'", ')', ':', 'raise', 'TypeError', '(', '"Transaction is missing attribute sender."', ',', '"If sending an unsigned transaction, use SpoofTransaction and provide the"', ',', '"sender using the \'from\' parameter"', ')', 'minimum_transaction', '=', 'SpoofTransaction', '(', 'transaction', ',', 'gas', '=', 'transaction', '.', 'intrinsic_gas', ',', 'gas_price', '=', '0', ',', ')', 'if', '_get_computation_error', '(', 'state', ',', 'minimum_transaction', ')', 'is', 'None', ':', 'return', 'transaction', '.', 'intrinsic_gas', 'maximum_transaction', '=', 'SpoofTransaction', '(', 'transaction', ',', 'gas', '=', 'state', '.', 'gas_limit', ',', 'gas_price', '=', '0', ',', ')', 'error', '=', '_get_computation_error', '(', 'state', ',', 'maximum_transaction', ')', 'if', 'error', 'is', 'not', 'None', ':', 'raise', 'error', 'minimum_viable', '=', 'state', '.', 'gas_limit', 'maximum_out_of_gas', '=', 'transaction', '.', 'intrinsic_gas', 'while', 'minimum_viable', '-', 'maximum_out_of_gas', '>', 'tolerance', ':', 'midpoint', '=', '(', 'minimum_viable', '+', 'maximum_out_of_gas', ')', '//', '2', 'test_transaction', '=', 'SpoofTransaction', '(', 'transaction', ',', 'gas', '=', 'midpoint', ')', 'if', '_get_computation_error', '(', 'state', ',', 'test_transaction', ')', 'is', 'None', ':', 'minimum_viable', '=', 'midpoint', 'else', ':', 'maximum_out_of_gas', '=', 'midpoint', 'return', 'minimum_viable']
Run the transaction with various gas limits, progressively approaching the minimum needed to succeed without an OutOfGas exception. The starting range of possible estimates is: [transaction.intrinsic_gas, state.gas_limit]. After the first OutOfGas exception, the range is: (largest_limit_out_of_gas, state.gas_limit]. After the first run not out of gas, the range is: (largest_limit_out_of_gas, smallest_success]. :param int tolerance: When the range of estimates is less than tolerance, return the top of the range. :returns int: The smallest confirmed gas to not throw an OutOfGas exception, subject to tolerance. If OutOfGas is thrown at block limit, return block limit. :raises VMError: if the computation fails even when given the block gas_limit to complete
['Run', 'the', 'transaction', 'with', 'various', 'gas', 'limits', 'progressively', 'approaching', 'the', 'minimum', 'needed', 'to', 'succeed', 'without', 'an', 'OutOfGas', 'exception', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/estimators/gas.py#L28-L78
78
SatelliteQE/nailgun
nailgun/entities.py
ContentViewPuppetModule.read
def read(self, entity=None, attrs=None, ignore=None, params=None): """Provide a default value for ``entity``. By default, ``nailgun.entity_mixins.EntityReadMixin.read provides a default value for ``entity`` like so:: entity = type(self)() However, :class:`ContentViewPuppetModule` requires that an ``content_view`` be provided, so this technique will not work. Do this instead:: entity = type(self)(content_view=self.content_view.id) """ # read() should not change the state of the object it's called on, but # super() alters the attributes of any entity passed in. Creating a new # object and passing it to super() lets this one avoid changing state. if entity is None: entity = type(self)( self._server_config, content_view=self.content_view, # pylint:disable=no-member ) if ignore is None: ignore = set() ignore.add('content_view') return super(ContentViewPuppetModule, self).read( entity, attrs, ignore, params)
python
def read(self, entity=None, attrs=None, ignore=None, params=None): """Provide a default value for ``entity``. By default, ``nailgun.entity_mixins.EntityReadMixin.read provides a default value for ``entity`` like so:: entity = type(self)() However, :class:`ContentViewPuppetModule` requires that an ``content_view`` be provided, so this technique will not work. Do this instead:: entity = type(self)(content_view=self.content_view.id) """ # read() should not change the state of the object it's called on, but # super() alters the attributes of any entity passed in. Creating a new # object and passing it to super() lets this one avoid changing state. if entity is None: entity = type(self)( self._server_config, content_view=self.content_view, # pylint:disable=no-member ) if ignore is None: ignore = set() ignore.add('content_view') return super(ContentViewPuppetModule, self).read( entity, attrs, ignore, params)
['def', 'read', '(', 'self', ',', 'entity', '=', 'None', ',', 'attrs', '=', 'None', ',', 'ignore', '=', 'None', ',', 'params', '=', 'None', ')', ':', "# read() should not change the state of the object it's called on, but", '# super() alters the attributes of any entity passed in. Creating a new', '# object and passing it to super() lets this one avoid changing state.', 'if', 'entity', 'is', 'None', ':', 'entity', '=', 'type', '(', 'self', ')', '(', 'self', '.', '_server_config', ',', 'content_view', '=', 'self', '.', 'content_view', ',', '# pylint:disable=no-member', ')', 'if', 'ignore', 'is', 'None', ':', 'ignore', '=', 'set', '(', ')', 'ignore', '.', 'add', '(', "'content_view'", ')', 'return', 'super', '(', 'ContentViewPuppetModule', ',', 'self', ')', '.', 'read', '(', 'entity', ',', 'attrs', ',', 'ignore', ',', 'params', ')']
Provide a default value for ``entity``. By default, ``nailgun.entity_mixins.EntityReadMixin.read provides a default value for ``entity`` like so:: entity = type(self)() However, :class:`ContentViewPuppetModule` requires that an ``content_view`` be provided, so this technique will not work. Do this instead:: entity = type(self)(content_view=self.content_view.id)
['Provide', 'a', 'default', 'value', 'for', 'entity', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L2453-L2480
79
jordanh/neurio-python
neurio/__init__.py
Client.get_samples_live_last
def get_samples_live_last(self, sensor_id): """Get the last sample recorded by the sensor. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples/live/last" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
def get_samples_live_last(self, sensor_id): """Get the last sample recorded by the sensor. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples/live/last" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
['def', 'get_samples_live_last', '(', 'self', ',', 'sensor_id', ')', ':', 'url', '=', '"https://api.neur.io/v1/samples/live/last"', 'headers', '=', 'self', '.', '__gen_headers', '(', ')', 'headers', '[', '"Content-Type"', ']', '=', '"application/json"', 'params', '=', '{', '"sensorId"', ':', 'sensor_id', '}', 'url', '=', 'self', '.', '__append_url_params', '(', 'url', ',', 'params', ')', 'r', '=', 'requests', '.', 'get', '(', 'url', ',', 'headers', '=', 'headers', ')', 'return', 'r', '.', 'json', '(', ')']
Get the last sample recorded by the sensor. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` Returns: list: dictionary objects containing sample data
['Get', 'the', 'last', 'sample', 'recorded', 'by', 'the', 'sensor', '.']
train
https://github.com/jordanh/neurio-python/blob/3a1bcadadb3bb3ad48f2df41c039d8b828ffd9c8/neurio/__init__.py#L439-L458
80
markovmodel/PyEMMA
pyemma/plots/plots2d.py
plot_contour
def plot_contour( xall, yall, zall, ax=None, cmap=None, ncontours=100, vmin=None, vmax=None, levels=None, cbar=True, cax=None, cbar_label=None, cbar_orientation='vertical', norm=None, nbins=100, method='nearest', mask=False, **kwargs): """Plot a two-dimensional contour map by interpolating scattered data on a grid. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot; use legacy style calculation if 'legacy'. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. nbins : int, optional, default=100 Number of grid points used in each dimension. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. mask : boolean, optional, default=False Hide unsampled areas is True. Optional parameters for contourf (**kwargs) ------------------------------------------- corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """ x, y, z = get_grid_data( xall, yall, zall, nbins=nbins, method=method) if vmin is None: vmin = _np.min(zall[zall > -_np.inf]) if vmax is None: vmax = _np.max(zall[zall < _np.inf]) if levels == 'legacy': eps = (vmax - vmin) / float(ncontours) levels = _np.linspace(vmin - eps, vmax + eps) if mask: _, _, counts = get_histogram( xall, yall, nbins=nbins, weights=None, avoid_zero_count=None) z = _np.ma.masked_where(counts.T <= 0, z) return plot_map( x, y, z, ax=ax, cmap=cmap, ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels, cbar=cbar, cax=cax, cbar_label=cbar_label, cbar_orientation=cbar_orientation, norm=norm, **kwargs)
python
def plot_contour( xall, yall, zall, ax=None, cmap=None, ncontours=100, vmin=None, vmax=None, levels=None, cbar=True, cax=None, cbar_label=None, cbar_orientation='vertical', norm=None, nbins=100, method='nearest', mask=False, **kwargs): """Plot a two-dimensional contour map by interpolating scattered data on a grid. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot; use legacy style calculation if 'legacy'. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. nbins : int, optional, default=100 Number of grid points used in each dimension. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. mask : boolean, optional, default=False Hide unsampled areas is True. Optional parameters for contourf (**kwargs) ------------------------------------------- corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """ x, y, z = get_grid_data( xall, yall, zall, nbins=nbins, method=method) if vmin is None: vmin = _np.min(zall[zall > -_np.inf]) if vmax is None: vmax = _np.max(zall[zall < _np.inf]) if levels == 'legacy': eps = (vmax - vmin) / float(ncontours) levels = _np.linspace(vmin - eps, vmax + eps) if mask: _, _, counts = get_histogram( xall, yall, nbins=nbins, weights=None, avoid_zero_count=None) z = _np.ma.masked_where(counts.T <= 0, z) return plot_map( x, y, z, ax=ax, cmap=cmap, ncontours=ncontours, vmin=vmin, vmax=vmax, levels=levels, cbar=cbar, cax=cax, cbar_label=cbar_label, cbar_orientation=cbar_orientation, norm=norm, **kwargs)
['def', 'plot_contour', '(', 'xall', ',', 'yall', ',', 'zall', ',', 'ax', '=', 'None', ',', 'cmap', '=', 'None', ',', 'ncontours', '=', '100', ',', 'vmin', '=', 'None', ',', 'vmax', '=', 'None', ',', 'levels', '=', 'None', ',', 'cbar', '=', 'True', ',', 'cax', '=', 'None', ',', 'cbar_label', '=', 'None', ',', 'cbar_orientation', '=', "'vertical'", ',', 'norm', '=', 'None', ',', 'nbins', '=', '100', ',', 'method', '=', "'nearest'", ',', 'mask', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'x', ',', 'y', ',', 'z', '=', 'get_grid_data', '(', 'xall', ',', 'yall', ',', 'zall', ',', 'nbins', '=', 'nbins', ',', 'method', '=', 'method', ')', 'if', 'vmin', 'is', 'None', ':', 'vmin', '=', '_np', '.', 'min', '(', 'zall', '[', 'zall', '>', '-', '_np', '.', 'inf', ']', ')', 'if', 'vmax', 'is', 'None', ':', 'vmax', '=', '_np', '.', 'max', '(', 'zall', '[', 'zall', '<', '_np', '.', 'inf', ']', ')', 'if', 'levels', '==', "'legacy'", ':', 'eps', '=', '(', 'vmax', '-', 'vmin', ')', '/', 'float', '(', 'ncontours', ')', 'levels', '=', '_np', '.', 'linspace', '(', 'vmin', '-', 'eps', ',', 'vmax', '+', 'eps', ')', 'if', 'mask', ':', '_', ',', '_', ',', 'counts', '=', 'get_histogram', '(', 'xall', ',', 'yall', ',', 'nbins', '=', 'nbins', ',', 'weights', '=', 'None', ',', 'avoid_zero_count', '=', 'None', ')', 'z', '=', '_np', '.', 'ma', '.', 'masked_where', '(', 'counts', '.', 'T', '<=', '0', ',', 'z', ')', 'return', 'plot_map', '(', 'x', ',', 'y', ',', 'z', ',', 'ax', '=', 'ax', ',', 'cmap', '=', 'cmap', ',', 'ncontours', '=', 'ncontours', ',', 'vmin', '=', 'vmin', ',', 'vmax', '=', 'vmax', ',', 'levels', '=', 'levels', ',', 'cbar', '=', 'cbar', ',', 'cax', '=', 'cax', ',', 'cbar_label', '=', 'cbar_label', ',', 'cbar_orientation', '=', 'cbar_orientation', ',', 'norm', '=', 'norm', ',', '*', '*', 'kwargs', ')']
Plot a two-dimensional contour map by interpolating scattered data on a grid. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot; use legacy style calculation if 'legacy'. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. nbins : int, optional, default=100 Number of grid points used in each dimension. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. mask : boolean, optional, default=False Hide unsampled areas is True. Optional parameters for contourf (**kwargs) ------------------------------------------- corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'.
['Plot', 'a', 'two', '-', 'dimensional', 'contour', 'map', 'by', 'interpolating', 'scattered', 'data', 'on', 'a', 'grid', '.']
train
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L686-L815
81
tomturner/django-tenants
django_tenants/clone.py
CloneSchema.clone_schema
def clone_schema(self, base_schema_name, new_schema_name): """ Creates a new schema `new_schema_name` as a clone of an existing schema `old_schema_name`. """ connection.set_schema_to_public() cursor = connection.cursor() # check if the clone_schema function already exists in the db try: cursor.execute("SELECT 'clone_schema'::regproc") except ProgrammingError: self._create_clone_schema_function() transaction.commit() sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)' cursor.execute( sql, {'base_schema': base_schema_name, 'new_schema': new_schema_name} ) cursor.close()
python
def clone_schema(self, base_schema_name, new_schema_name): """ Creates a new schema `new_schema_name` as a clone of an existing schema `old_schema_name`. """ connection.set_schema_to_public() cursor = connection.cursor() # check if the clone_schema function already exists in the db try: cursor.execute("SELECT 'clone_schema'::regproc") except ProgrammingError: self._create_clone_schema_function() transaction.commit() sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)' cursor.execute( sql, {'base_schema': base_schema_name, 'new_schema': new_schema_name} ) cursor.close()
['def', 'clone_schema', '(', 'self', ',', 'base_schema_name', ',', 'new_schema_name', ')', ':', 'connection', '.', 'set_schema_to_public', '(', ')', 'cursor', '=', 'connection', '.', 'cursor', '(', ')', '# check if the clone_schema function already exists in the db', 'try', ':', 'cursor', '.', 'execute', '(', '"SELECT \'clone_schema\'::regproc"', ')', 'except', 'ProgrammingError', ':', 'self', '.', '_create_clone_schema_function', '(', ')', 'transaction', '.', 'commit', '(', ')', 'sql', '=', "'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)'", 'cursor', '.', 'execute', '(', 'sql', ',', '{', "'base_schema'", ':', 'base_schema_name', ',', "'new_schema'", ':', 'new_schema_name', '}', ')', 'cursor', '.', 'close', '(', ')']
Creates a new schema `new_schema_name` as a clone of an existing schema `old_schema_name`.
['Creates', 'a', 'new', 'schema', 'new_schema_name', 'as', 'a', 'clone', 'of', 'an', 'existing', 'schema', 'old_schema_name', '.']
train
https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/clone.py#L213-L233
82
toumorokoshi/jenks
jenks/utils.py
get_configuration_file
def get_configuration_file(): """ return jenks configuration file """ path = os.path.abspath(os.curdir) while path != os.sep: config_path = os.path.join(path, CONFIG_FILE_NAME) if os.path.exists(config_path): return config_path path = os.path.dirname(path) return None
python
def get_configuration_file(): """ return jenks configuration file """ path = os.path.abspath(os.curdir) while path != os.sep: config_path = os.path.join(path, CONFIG_FILE_NAME) if os.path.exists(config_path): return config_path path = os.path.dirname(path) return None
['def', 'get_configuration_file', '(', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'curdir', ')', 'while', 'path', '!=', 'os', '.', 'sep', ':', 'config_path', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'CONFIG_FILE_NAME', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'config_path', ')', ':', 'return', 'config_path', 'path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'path', ')', 'return', 'None']
return jenks configuration file
['return', 'jenks', 'configuration', 'file']
train
https://github.com/toumorokoshi/jenks/blob/d3333a7b86ba290b7185aa5b8da75e76a28124f5/jenks/utils.py#L36-L44
83
mitsei/dlkit
dlkit/json_/repository/sessions.py
AssetAdminSession.get_asset_form_for_create
def get_asset_form_for_create(self, asset_record_types): """Gets the asset form for creating new assets. A new form should be requested for each create transaction. arg: asset_record_types (osid.type.Type[]): array of asset record types return: (osid.repository.AssetForm) - the asset form raise: NullArgument - ``asset_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in asset_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if asset_record_types == []: obj_form = objects.AssetForm( repository_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssetForm( repository_id=self._catalog_id, record_types=asset_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
python
def get_asset_form_for_create(self, asset_record_types): """Gets the asset form for creating new assets. A new form should be requested for each create transaction. arg: asset_record_types (osid.type.Type[]): array of asset record types return: (osid.repository.AssetForm) - the asset form raise: NullArgument - ``asset_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in asset_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if asset_record_types == []: obj_form = objects.AssetForm( repository_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssetForm( repository_id=self._catalog_id, record_types=asset_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
['def', 'get_asset_form_for_create', '(', 'self', ',', 'asset_record_types', ')', ':', '# Implemented from template for', '# osid.resource.ResourceAdminSession.get_resource_form_for_create_template', 'for', 'arg', 'in', 'asset_record_types', ':', 'if', 'not', 'isinstance', '(', 'arg', ',', 'ABCType', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'one or more argument array elements is not a valid OSID Type'", ')', 'if', 'asset_record_types', '==', '[', ']', ':', 'obj_form', '=', 'objects', '.', 'AssetForm', '(', 'repository_id', '=', 'self', '.', '_catalog_id', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'effective_agent_id', '=', 'self', '.', 'get_effective_agent_id', '(', ')', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'else', ':', 'obj_form', '=', 'objects', '.', 'AssetForm', '(', 'repository_id', '=', 'self', '.', '_catalog_id', ',', 'record_types', '=', 'asset_record_types', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'effective_agent_id', '=', 'self', '.', 'get_effective_agent_id', '(', ')', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'self', '.', '_forms', '[', 'obj_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '=', 'not', 'CREATED', 'return', 'obj_form']
Gets the asset form for creating new assets. A new form should be requested for each create transaction. arg: asset_record_types (osid.type.Type[]): array of asset record types return: (osid.repository.AssetForm) - the asset form raise: NullArgument - ``asset_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
['Gets', 'the', 'asset', 'form', 'for', 'creating', 'new', 'assets', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L1279-L1314
84
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.verify
def verify(self, email): """ Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object. """ resp = self._call(endpoint='single', data={'email': email}) return VerifiedEmail(email, resp['result'])
python
def verify(self, email): """ Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object. """ resp = self._call(endpoint='single', data={'email': email}) return VerifiedEmail(email, resp['result'])
['def', 'verify', '(', 'self', ',', 'email', ')', ':', 'resp', '=', 'self', '.', '_call', '(', 'endpoint', '=', "'single'", ',', 'data', '=', '{', "'email'", ':', 'email', '}', ')', 'return', 'VerifiedEmail', '(', 'email', ',', 'resp', '[', "'result'", ']', ')']
Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object.
['Verify', 'a', 'single', 'email', 'address', '.', ':', 'param', 'str', 'email', ':', 'Email', 'address', 'to', 'verify', '.', ':', 'return', ':', 'A', 'VerifiedEmail', 'object', '.']
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L18-L25
85
JarryShaw/PyPCAPKit
src/protocols/internet/hip.py
HIP._read_para_hip_signature_2
def _read_para_hip_signature_2(self, code, cbit, clen, *, desc, length, version): """Read HIP HIP_SIGNATURE_2 parameter. Structure of HIP HIP_SIGNATURE_2 parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | SIG alg | Signature / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_signature_2.type Parameter Type 1 15 hip_signature_2.critical Critical Bit 2 16 hip_signature_2.length Length of Contents 4 32 hip_signature_2.algorithm SIG Algorithm 6 48 hip_signature_2.signature Signature ? ? - Padding """ _algo = self._read_unpack(2) _sign = self._read_fileng(clen-2) hip_signature_2 = dict( type=desc, critical=cbit, length=clen, algorithm=_HI_ALGORITHM.get(_algo, 'Unassigned'), signature=_sign, ) _plen = length - clen if _plen: self._read_fileng(_plen) return hip_signature_2
python
def _read_para_hip_signature_2(self, code, cbit, clen, *, desc, length, version): """Read HIP HIP_SIGNATURE_2 parameter. Structure of HIP HIP_SIGNATURE_2 parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | SIG alg | Signature / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_signature_2.type Parameter Type 1 15 hip_signature_2.critical Critical Bit 2 16 hip_signature_2.length Length of Contents 4 32 hip_signature_2.algorithm SIG Algorithm 6 48 hip_signature_2.signature Signature ? ? - Padding """ _algo = self._read_unpack(2) _sign = self._read_fileng(clen-2) hip_signature_2 = dict( type=desc, critical=cbit, length=clen, algorithm=_HI_ALGORITHM.get(_algo, 'Unassigned'), signature=_sign, ) _plen = length - clen if _plen: self._read_fileng(_plen) return hip_signature_2
['def', '_read_para_hip_signature_2', '(', 'self', ',', 'code', ',', 'cbit', ',', 'clen', ',', '*', ',', 'desc', ',', 'length', ',', 'version', ')', ':', '_algo', '=', 'self', '.', '_read_unpack', '(', '2', ')', '_sign', '=', 'self', '.', '_read_fileng', '(', 'clen', '-', '2', ')', 'hip_signature_2', '=', 'dict', '(', 'type', '=', 'desc', ',', 'critical', '=', 'cbit', ',', 'length', '=', 'clen', ',', 'algorithm', '=', '_HI_ALGORITHM', '.', 'get', '(', '_algo', ',', "'Unassigned'", ')', ',', 'signature', '=', '_sign', ',', ')', '_plen', '=', 'length', '-', 'clen', 'if', '_plen', ':', 'self', '.', '_read_fileng', '(', '_plen', ')', 'return', 'hip_signature_2']
Read HIP HIP_SIGNATURE_2 parameter. Structure of HIP HIP_SIGNATURE_2 parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | SIG alg | Signature / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_signature_2.type Parameter Type 1 15 hip_signature_2.critical Critical Bit 2 16 hip_signature_2.length Length of Contents 4 32 hip_signature_2.algorithm SIG Algorithm 6 48 hip_signature_2.signature Signature ? ? - Padding
['Read', 'HIP', 'HIP_SIGNATURE_2', 'parameter', '.']
train
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L2023-L2061
86
Preston-Landers/concurrent-log-handler
src/concurrent_log_handler/__init__.py
ConcurrentRotatingFileHandler.emit
def emit(self, record): """ Emit a record. Override from parent class to handle file locking for the duration of rollover and write. This also does the formatting *before* locks are obtained, in case the format itself does logging calls from within. Rollover also occurs while the lock is held. """ # noinspection PyBroadException try: msg = self.format(record) try: self._do_lock() try: if self.shouldRollover(record): self.doRollover() except Exception as e: self._console_log("Unable to do rollover: %s" % (e,), stack=True) # Continue on anyway self.do_write(msg) finally: self._do_unlock() except Exception: self.handleError(record)
python
def emit(self, record): """ Emit a record. Override from parent class to handle file locking for the duration of rollover and write. This also does the formatting *before* locks are obtained, in case the format itself does logging calls from within. Rollover also occurs while the lock is held. """ # noinspection PyBroadException try: msg = self.format(record) try: self._do_lock() try: if self.shouldRollover(record): self.doRollover() except Exception as e: self._console_log("Unable to do rollover: %s" % (e,), stack=True) # Continue on anyway self.do_write(msg) finally: self._do_unlock() except Exception: self.handleError(record)
['def', 'emit', '(', 'self', ',', 'record', ')', ':', '# noinspection PyBroadException', 'try', ':', 'msg', '=', 'self', '.', 'format', '(', 'record', ')', 'try', ':', 'self', '.', '_do_lock', '(', ')', 'try', ':', 'if', 'self', '.', 'shouldRollover', '(', 'record', ')', ':', 'self', '.', 'doRollover', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', '_console_log', '(', '"Unable to do rollover: %s"', '%', '(', 'e', ',', ')', ',', 'stack', '=', 'True', ')', '# Continue on anyway', 'self', '.', 'do_write', '(', 'msg', ')', 'finally', ':', 'self', '.', '_do_unlock', '(', ')', 'except', 'Exception', ':', 'self', '.', 'handleError', '(', 'record', ')']
Emit a record. Override from parent class to handle file locking for the duration of rollover and write. This also does the formatting *before* locks are obtained, in case the format itself does logging calls from within. Rollover also occurs while the lock is held.
['Emit', 'a', 'record', '.']
train
https://github.com/Preston-Landers/concurrent-log-handler/blob/8e0b8e28c2b12e854853d723b3c28346a3218914/src/concurrent_log_handler/__init__.py#L298-L324
87
Autodesk/aomi
aomi/vault.py
Client.server_version
def server_version(self): """Attempts to determine the version of Vault that a server is running. Some actions will change on older Vault deployments.""" health_url = "%s/v1/sys/health" % self.vault_addr resp = self.session.request('get', health_url, **self._kwargs) if resp.status_code == 200 or resp.status_code == 429: blob = resp.json() if 'version' in blob: return blob['version'] else: raise aomi.exceptions.VaultProblem('Health check failed') return None
python
def server_version(self): """Attempts to determine the version of Vault that a server is running. Some actions will change on older Vault deployments.""" health_url = "%s/v1/sys/health" % self.vault_addr resp = self.session.request('get', health_url, **self._kwargs) if resp.status_code == 200 or resp.status_code == 429: blob = resp.json() if 'version' in blob: return blob['version'] else: raise aomi.exceptions.VaultProblem('Health check failed') return None
['def', 'server_version', '(', 'self', ')', ':', 'health_url', '=', '"%s/v1/sys/health"', '%', 'self', '.', 'vault_addr', 'resp', '=', 'self', '.', 'session', '.', 'request', '(', "'get'", ',', 'health_url', ',', '*', '*', 'self', '.', '_kwargs', ')', 'if', 'resp', '.', 'status_code', '==', '200', 'or', 'resp', '.', 'status_code', '==', '429', ':', 'blob', '=', 'resp', '.', 'json', '(', ')', 'if', "'version'", 'in', 'blob', ':', 'return', 'blob', '[', "'version'", ']', 'else', ':', 'raise', 'aomi', '.', 'exceptions', '.', 'VaultProblem', '(', "'Health check failed'", ')', 'return', 'None']
Attempts to determine the version of Vault that a server is running. Some actions will change on older Vault deployments.
['Attempts', 'to', 'determine', 'the', 'version', 'of', 'Vault', 'that', 'a', 'server', 'is', 'running', '.', 'Some', 'actions', 'will', 'change', 'on', 'older', 'Vault', 'deployments', '.']
train
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/vault.py#L191-L204
88
ryanmcgrath/twython
twython/streaming/types.py
TwythonStreamerTypes.site
def site(self, **params): """Stream site Accepted params found at: https://dev.twitter.com/docs/api/1.1/get/site """ url = 'https://sitestream.twitter.com/%s/site.json' \ % self.streamer.api_version self.streamer._request(url, params=params)
python
def site(self, **params): """Stream site Accepted params found at: https://dev.twitter.com/docs/api/1.1/get/site """ url = 'https://sitestream.twitter.com/%s/site.json' \ % self.streamer.api_version self.streamer._request(url, params=params)
['def', 'site', '(', 'self', ',', '*', '*', 'params', ')', ':', 'url', '=', "'https://sitestream.twitter.com/%s/site.json'", '%', 'self', '.', 'streamer', '.', 'api_version', 'self', '.', 'streamer', '.', '_request', '(', 'url', ',', 'params', '=', 'params', ')']
Stream site Accepted params found at: https://dev.twitter.com/docs/api/1.1/get/site
['Stream', 'site']
train
https://github.com/ryanmcgrath/twython/blob/7366de80efcbbdfaf615d3f1fea72546196916fc/twython/streaming/types.py#L33-L41
89
rsinger86/drf-flex-fields
rest_flex_fields/serializers.py
FlexFieldsSerializerMixin._get_expand_input
def _get_expand_input(self, passed_settings): """ If expand value is explicitliy passed, just return it. If parsing from request, ensure that the value complies with the "permitted_expands" list passed into the context from the FlexFieldsMixin. """ value = passed_settings.get("expand") if len(value) > 0: return value if not self._can_access_request: return [] expand = self._parse_request_list_value("expand") if "permitted_expands" in self.context: permitted_expands = self.context["permitted_expands"] if "~all" in expand or "*" in expand: return permitted_expands else: return list(set(expand) & set(permitted_expands)) return expand
python
def _get_expand_input(self, passed_settings): """ If expand value is explicitliy passed, just return it. If parsing from request, ensure that the value complies with the "permitted_expands" list passed into the context from the FlexFieldsMixin. """ value = passed_settings.get("expand") if len(value) > 0: return value if not self._can_access_request: return [] expand = self._parse_request_list_value("expand") if "permitted_expands" in self.context: permitted_expands = self.context["permitted_expands"] if "~all" in expand or "*" in expand: return permitted_expands else: return list(set(expand) & set(permitted_expands)) return expand
['def', '_get_expand_input', '(', 'self', ',', 'passed_settings', ')', ':', 'value', '=', 'passed_settings', '.', 'get', '(', '"expand"', ')', 'if', 'len', '(', 'value', ')', '>', '0', ':', 'return', 'value', 'if', 'not', 'self', '.', '_can_access_request', ':', 'return', '[', ']', 'expand', '=', 'self', '.', '_parse_request_list_value', '(', '"expand"', ')', 'if', '"permitted_expands"', 'in', 'self', '.', 'context', ':', 'permitted_expands', '=', 'self', '.', 'context', '[', '"permitted_expands"', ']', 'if', '"~all"', 'in', 'expand', 'or', '"*"', 'in', 'expand', ':', 'return', 'permitted_expands', 'else', ':', 'return', 'list', '(', 'set', '(', 'expand', ')', '&', 'set', '(', 'permitted_expands', ')', ')', 'return', 'expand']
If expand value is explicitliy passed, just return it. If parsing from request, ensure that the value complies with the "permitted_expands" list passed into the context from the FlexFieldsMixin.
['If', 'expand', 'value', 'is', 'explicitliy', 'passed', 'just', 'return', 'it', '.', 'If', 'parsing', 'from', 'request', 'ensure', 'that', 'the', 'value', 'complies', 'with', 'the', 'permitted_expands', 'list', 'passed', 'into', 'the', 'context', 'from', 'the', 'FlexFieldsMixin', '.']
train
https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L196-L221
90
eyurtsev/FlowCytometryTools
FlowCytometryTools/gui/fc_widget.py
BaseVertex.update_coordinates
def update_coordinates(self, new_coordinates): """ new_coordinates : dict """ for k, v in new_coordinates.items(): if k in self.coordinates: self.coordinates[k] = v for svertex in self.spawn_list: verts = tuple([self.coordinates.get(ch, None) for ch in svertex.channels]) if len(svertex.channels) == 1: # This means a histogram svertex.update_position(verts[0], None) else: svertex.update_position(verts[0], verts[1]) self.callback(Event(Event.BASE_GATE_CHANGED))
python
def update_coordinates(self, new_coordinates): """ new_coordinates : dict """ for k, v in new_coordinates.items(): if k in self.coordinates: self.coordinates[k] = v for svertex in self.spawn_list: verts = tuple([self.coordinates.get(ch, None) for ch in svertex.channels]) if len(svertex.channels) == 1: # This means a histogram svertex.update_position(verts[0], None) else: svertex.update_position(verts[0], verts[1]) self.callback(Event(Event.BASE_GATE_CHANGED))
['def', 'update_coordinates', '(', 'self', ',', 'new_coordinates', ')', ':', 'for', 'k', ',', 'v', 'in', 'new_coordinates', '.', 'items', '(', ')', ':', 'if', 'k', 'in', 'self', '.', 'coordinates', ':', 'self', '.', 'coordinates', '[', 'k', ']', '=', 'v', 'for', 'svertex', 'in', 'self', '.', 'spawn_list', ':', 'verts', '=', 'tuple', '(', '[', 'self', '.', 'coordinates', '.', 'get', '(', 'ch', ',', 'None', ')', 'for', 'ch', 'in', 'svertex', '.', 'channels', ']', ')', 'if', 'len', '(', 'svertex', '.', 'channels', ')', '==', '1', ':', '# This means a histogram', 'svertex', '.', 'update_position', '(', 'verts', '[', '0', ']', ',', 'None', ')', 'else', ':', 'svertex', '.', 'update_position', '(', 'verts', '[', '0', ']', ',', 'verts', '[', '1', ']', ')', 'self', '.', 'callback', '(', 'Event', '(', 'Event', '.', 'BASE_GATE_CHANGED', ')', ')']
new_coordinates : dict
['new_coordinates', ':', 'dict']
train
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L150-L165
91
abelcarreras/DynaPhoPy
dynaphopy/analysis/fitting/fitting_functions.py
Gaussian_function._function
def _function(self, x, a, b, c, d): """Gaussian PDF function x: coordinate a: peak position b: deviation (sigma) c: area proportional parameter d: base line """ return c/b*np.sqrt(2*np.pi)*np.exp(-(x-a)**2/(2*b**2))+d
python
def _function(self, x, a, b, c, d): """Gaussian PDF function x: coordinate a: peak position b: deviation (sigma) c: area proportional parameter d: base line """ return c/b*np.sqrt(2*np.pi)*np.exp(-(x-a)**2/(2*b**2))+d
['def', '_function', '(', 'self', ',', 'x', ',', 'a', ',', 'b', ',', 'c', ',', 'd', ')', ':', 'return', 'c', '/', 'b', '*', 'np', '.', 'sqrt', '(', '2', '*', 'np', '.', 'pi', ')', '*', 'np', '.', 'exp', '(', '-', '(', 'x', '-', 'a', ')', '**', '2', '/', '(', '2', '*', 'b', '**', '2', ')', ')', '+', 'd']
Gaussian PDF function x: coordinate a: peak position b: deviation (sigma) c: area proportional parameter d: base line
['Gaussian', 'PDF', 'function', 'x', ':', 'coordinate', 'a', ':', 'peak', 'position', 'b', ':', 'deviation', '(', 'sigma', ')', 'c', ':', 'area', 'proportional', 'parameter', 'd', ':', 'base', 'line']
train
https://github.com/abelcarreras/DynaPhoPy/blob/51e99422228e6be84830d659b88a0ca904d9136f/dynaphopy/analysis/fitting/fitting_functions.py#L302-L310
92
woolfson-group/isambard
isambard/optimisation/evo_optimizers.py
CMAES._initial_individual
def _initial_individual(self): """Generates an individual with random parameters within bounds.""" ind = creator.Individual( [random.uniform(-1, 1) for _ in range(len(self.value_means))]) return ind
python
def _initial_individual(self): """Generates an individual with random parameters within bounds.""" ind = creator.Individual( [random.uniform(-1, 1) for _ in range(len(self.value_means))]) return ind
['def', '_initial_individual', '(', 'self', ')', ':', 'ind', '=', 'creator', '.', 'Individual', '(', '[', 'random', '.', 'uniform', '(', '-', '1', ',', '1', ')', 'for', '_', 'in', 'range', '(', 'len', '(', 'self', '.', 'value_means', ')', ')', ']', ')', 'return', 'ind']
Generates an individual with random parameters within bounds.
['Generates', 'an', 'individual', 'with', 'random', 'parameters', 'within', 'bounds', '.']
train
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/evo_optimizers.py#L472-L477
93
yfpeng/bioc
bioc/biocxml/encoder.py
BioCXMLDocumentWriter.write_collection_info
def write_collection_info(self, collection: BioCCollection): """ Writes the collection information: encoding, version, DTD, source, date, key, infons, etc. """ elem = etree.Element('source') elem.text = collection.source self.__writer.send(elem) elem = etree.Element('date') elem.text = collection.date self.__writer.send(elem) elem = etree.Element('key') elem.text = collection.key self.__writer.send(elem) for k, v in collection.infons.items(): elem = etree.Element('infon', {'key': str(k)}) elem.text = str(v) self.__writer.send(elem)
python
def write_collection_info(self, collection: BioCCollection): """ Writes the collection information: encoding, version, DTD, source, date, key, infons, etc. """ elem = etree.Element('source') elem.text = collection.source self.__writer.send(elem) elem = etree.Element('date') elem.text = collection.date self.__writer.send(elem) elem = etree.Element('key') elem.text = collection.key self.__writer.send(elem) for k, v in collection.infons.items(): elem = etree.Element('infon', {'key': str(k)}) elem.text = str(v) self.__writer.send(elem)
['def', 'write_collection_info', '(', 'self', ',', 'collection', ':', 'BioCCollection', ')', ':', 'elem', '=', 'etree', '.', 'Element', '(', "'source'", ')', 'elem', '.', 'text', '=', 'collection', '.', 'source', 'self', '.', '__writer', '.', 'send', '(', 'elem', ')', 'elem', '=', 'etree', '.', 'Element', '(', "'date'", ')', 'elem', '.', 'text', '=', 'collection', '.', 'date', 'self', '.', '__writer', '.', 'send', '(', 'elem', ')', 'elem', '=', 'etree', '.', 'Element', '(', "'key'", ')', 'elem', '.', 'text', '=', 'collection', '.', 'key', 'self', '.', '__writer', '.', 'send', '(', 'elem', ')', 'for', 'k', ',', 'v', 'in', 'collection', '.', 'infons', '.', 'items', '(', ')', ':', 'elem', '=', 'etree', '.', 'Element', '(', "'infon'", ',', '{', "'key'", ':', 'str', '(', 'k', ')', '}', ')', 'elem', '.', 'text', '=', 'str', '(', 'v', ')', 'self', '.', '__writer', '.', 'send', '(', 'elem', ')']
Writes the collection information: encoding, version, DTD, source, date, key, infons, etc.
['Writes', 'the', 'collection', 'information', ':', 'encoding', 'version', 'DTD', 'source', 'date', 'key', 'infons', 'etc', '.']
train
https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocxml/encoder.py#L185-L204
94
pymc-devs/pymc
pymc/Matplot.py
autocorrelation
def autocorrelation( data, name, maxlags=100, format='png', reflected=False, suffix='-acf', path='./', fontmap=None, new=True, last=True, rows=1, columns=1, num=1, verbose=1): """ Generate bar plot of the autocorrelation function for a series (usually an MCMC trace). :Arguments: data: PyMC object, trace or array A trace from an MCMC sample or a PyMC object with one or more traces. name: string The name of the object. maxlags (optional): int The largest discrete value for the autocorrelation to be calculated (defaults to 100). format (optional): string Graphic output format (defaults to png). suffix (optional): string Filename suffix. path (optional): string Specifies location for saving plots (defaults to local directory). fontmap (optional): dict Font mapping for plot labels; most users should not specify this. verbose (optional): int Level of output verbosity. """ # Internal plotting specification for handling nested arrays if fontmap is None: fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4} # Stand-alone plot or subplot? standalone = rows == 1 and columns == 1 and num == 1 if standalone: if verbose > 0: print_('Plotting', name) figure() subplot(rows, columns, num) if ndim(data) == 1: maxlags = min(len(data) - 1, maxlags) try: acorr(data, detrend=mlab.detrend_mean, maxlags=maxlags) except: print_('Cannot plot autocorrelation for %s' % name) return # Set axis bounds ylim(-.1, 1.1) xlim(-maxlags * reflected or 0, maxlags) # Plot options title( '\n\n %s acorr' % name, x=0., y=1., ha='left', va='top', fontsize='small') # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1]) elif ndim(data) == 2: # generate acorr plot for each dimension rows = data.shape[1] for j in range(rows): autocorrelation( data[:, j], '%s_%d' % (name, j), maxlags, fontmap=fontmap, rows=rows, columns=1, num=j + 1) else: raise ValueError( 'Only 1- and 2- dimensional functions can be displayed') if standalone: if not os.path.exists(path): os.mkdir(path) if not path.endswith('/'): path += '/' # Save to fiel savefig("%s%s%s.%s" % (path, name, suffix, format))
python
def autocorrelation( data, name, maxlags=100, format='png', reflected=False, suffix='-acf', path='./', fontmap=None, new=True, last=True, rows=1, columns=1, num=1, verbose=1): """ Generate bar plot of the autocorrelation function for a series (usually an MCMC trace). :Arguments: data: PyMC object, trace or array A trace from an MCMC sample or a PyMC object with one or more traces. name: string The name of the object. maxlags (optional): int The largest discrete value for the autocorrelation to be calculated (defaults to 100). format (optional): string Graphic output format (defaults to png). suffix (optional): string Filename suffix. path (optional): string Specifies location for saving plots (defaults to local directory). fontmap (optional): dict Font mapping for plot labels; most users should not specify this. verbose (optional): int Level of output verbosity. """ # Internal plotting specification for handling nested arrays if fontmap is None: fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4} # Stand-alone plot or subplot? standalone = rows == 1 and columns == 1 and num == 1 if standalone: if verbose > 0: print_('Plotting', name) figure() subplot(rows, columns, num) if ndim(data) == 1: maxlags = min(len(data) - 1, maxlags) try: acorr(data, detrend=mlab.detrend_mean, maxlags=maxlags) except: print_('Cannot plot autocorrelation for %s' % name) return # Set axis bounds ylim(-.1, 1.1) xlim(-maxlags * reflected or 0, maxlags) # Plot options title( '\n\n %s acorr' % name, x=0., y=1., ha='left', va='top', fontsize='small') # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1]) elif ndim(data) == 2: # generate acorr plot for each dimension rows = data.shape[1] for j in range(rows): autocorrelation( data[:, j], '%s_%d' % (name, j), maxlags, fontmap=fontmap, rows=rows, columns=1, num=j + 1) else: raise ValueError( 'Only 1- and 2- dimensional functions can be displayed') if standalone: if not os.path.exists(path): os.mkdir(path) if not path.endswith('/'): path += '/' # Save to fiel savefig("%s%s%s.%s" % (path, name, suffix, format))
['def', 'autocorrelation', '(', 'data', ',', 'name', ',', 'maxlags', '=', '100', ',', 'format', '=', "'png'", ',', 'reflected', '=', 'False', ',', 'suffix', '=', "'-acf'", ',', 'path', '=', "'./'", ',', 'fontmap', '=', 'None', ',', 'new', '=', 'True', ',', 'last', '=', 'True', ',', 'rows', '=', '1', ',', 'columns', '=', '1', ',', 'num', '=', '1', ',', 'verbose', '=', '1', ')', ':', '# Internal plotting specification for handling nested arrays', 'if', 'fontmap', 'is', 'None', ':', 'fontmap', '=', '{', '1', ':', '10', ',', '2', ':', '8', ',', '3', ':', '6', ',', '4', ':', '5', ',', '5', ':', '4', '}', '# Stand-alone plot or subplot?', 'standalone', '=', 'rows', '==', '1', 'and', 'columns', '==', '1', 'and', 'num', '==', '1', 'if', 'standalone', ':', 'if', 'verbose', '>', '0', ':', 'print_', '(', "'Plotting'", ',', 'name', ')', 'figure', '(', ')', 'subplot', '(', 'rows', ',', 'columns', ',', 'num', ')', 'if', 'ndim', '(', 'data', ')', '==', '1', ':', 'maxlags', '=', 'min', '(', 'len', '(', 'data', ')', '-', '1', ',', 'maxlags', ')', 'try', ':', 'acorr', '(', 'data', ',', 'detrend', '=', 'mlab', '.', 'detrend_mean', ',', 'maxlags', '=', 'maxlags', ')', 'except', ':', 'print_', '(', "'Cannot plot autocorrelation for %s'", '%', 'name', ')', 'return', '# Set axis bounds', 'ylim', '(', '-', '.1', ',', '1.1', ')', 'xlim', '(', '-', 'maxlags', '*', 'reflected', 'or', '0', ',', 'maxlags', ')', '# Plot options', 'title', '(', "'\\n\\n %s acorr'", '%', 'name', ',', 'x', '=', '0.', ',', 'y', '=', '1.', ',', 'ha', '=', "'left'", ',', 'va', '=', "'top'", ',', 'fontsize', '=', "'small'", ')', '# Smaller tick labels', 'tlabels', '=', 'gca', '(', ')', '.', 'get_xticklabels', '(', ')', 'setp', '(', 'tlabels', ',', "'fontsize'", ',', 'fontmap', '[', '1', ']', ')', 'tlabels', '=', 'gca', '(', ')', '.', 'get_yticklabels', '(', ')', 'setp', '(', 'tlabels', ',', "'fontsize'", ',', 'fontmap', '[', '1', ']', ')', 'elif', 'ndim', '(', 'data', ')', '==', '2', ':', '# generate acorr plot for each dimension', 'rows', '=', 'data', '.', 'shape', '[', '1', ']', 'for', 'j', 'in', 'range', '(', 'rows', ')', ':', 'autocorrelation', '(', 'data', '[', ':', ',', 'j', ']', ',', "'%s_%d'", '%', '(', 'name', ',', 'j', ')', ',', 'maxlags', ',', 'fontmap', '=', 'fontmap', ',', 'rows', '=', 'rows', ',', 'columns', '=', '1', ',', 'num', '=', 'j', '+', '1', ')', 'else', ':', 'raise', 'ValueError', '(', "'Only 1- and 2- dimensional functions can be displayed'", ')', 'if', 'standalone', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'os', '.', 'mkdir', '(', 'path', ')', 'if', 'not', 'path', '.', 'endswith', '(', "'/'", ')', ':', 'path', '+=', "'/'", '# Save to fiel', 'savefig', '(', '"%s%s%s.%s"', '%', '(', 'path', ',', 'name', ',', 'suffix', ',', 'format', ')', ')']
Generate bar plot of the autocorrelation function for a series (usually an MCMC trace). :Arguments: data: PyMC object, trace or array A trace from an MCMC sample or a PyMC object with one or more traces. name: string The name of the object. maxlags (optional): int The largest discrete value for the autocorrelation to be calculated (defaults to 100). format (optional): string Graphic output format (defaults to png). suffix (optional): string Filename suffix. path (optional): string Specifies location for saving plots (defaults to local directory). fontmap (optional): dict Font mapping for plot labels; most users should not specify this. verbose (optional): int Level of output verbosity.
['Generate', 'bar', 'plot', 'of', 'the', 'autocorrelation', 'function', 'for', 'a', 'series', '(', 'usually', 'an', 'MCMC', 'trace', ')', '.']
train
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L899-L997
95
pywbem/pywbem
pywbem/cim_obj.py
CIMProperty.tocimxml
def tocimxml(self): """ Return the CIM-XML representation of this CIM property, as an object of an appropriate subclass of :term:`Element`. The returned CIM-XML representation is a `PROPERTY`, `PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the property type, and consistent with :term:`DSP0201`. Note that array properties cannot be of reference type. The order of qualifiers in the returned CIM-XML representation is preserved from the :class:`~pywbem.CIMProperty` object. Returns: The CIM-XML representation, as an object of an appropriate subclass of :term:`Element`. """ qualifiers = [q.tocimxml() for q in self.qualifiers.values()] if self.is_array: # pylint: disable=no-else-return assert self.type != 'reference' if self.value is None: value_xml = None else: array_xml = [] for v in self.value: if v is None: if SEND_VALUE_NULL: array_xml.append(cim_xml.VALUE_NULL()) else: array_xml.append(cim_xml.VALUE(None)) elif self.embedded_object is not None: assert isinstance(v, (CIMInstance, CIMClass)) array_xml.append(cim_xml.VALUE(v.tocimxml().toxml())) else: array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v))) value_xml = cim_xml.VALUE_ARRAY(array_xml) return cim_xml.PROPERTY_ARRAY( self.name, self.type, value_xml, self.array_size, self.class_origin, self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers) elif self.type == 'reference': # scalar if self.value is None: value_xml = None else: value_xml = cim_xml.VALUE_REFERENCE(self.value.tocimxml()) return cim_xml.PROPERTY_REFERENCE( self.name, value_xml, reference_class=self.reference_class, class_origin=self.class_origin, propagated=self.propagated, qualifiers=qualifiers) else: # scalar non-reference if self.value is None: value_xml = None else: if self.embedded_object is not None: assert isinstance(self.value, (CIMInstance, CIMClass)) value_xml = cim_xml.VALUE(self.value.tocimxml().toxml()) else: value_xml = cim_xml.VALUE(atomic_to_cim_xml(self.value)) return cim_xml.PROPERTY( self.name, self.type, value_xml, class_origin=self.class_origin, propagated=self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers)
python
def tocimxml(self): """ Return the CIM-XML representation of this CIM property, as an object of an appropriate subclass of :term:`Element`. The returned CIM-XML representation is a `PROPERTY`, `PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the property type, and consistent with :term:`DSP0201`. Note that array properties cannot be of reference type. The order of qualifiers in the returned CIM-XML representation is preserved from the :class:`~pywbem.CIMProperty` object. Returns: The CIM-XML representation, as an object of an appropriate subclass of :term:`Element`. """ qualifiers = [q.tocimxml() for q in self.qualifiers.values()] if self.is_array: # pylint: disable=no-else-return assert self.type != 'reference' if self.value is None: value_xml = None else: array_xml = [] for v in self.value: if v is None: if SEND_VALUE_NULL: array_xml.append(cim_xml.VALUE_NULL()) else: array_xml.append(cim_xml.VALUE(None)) elif self.embedded_object is not None: assert isinstance(v, (CIMInstance, CIMClass)) array_xml.append(cim_xml.VALUE(v.tocimxml().toxml())) else: array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v))) value_xml = cim_xml.VALUE_ARRAY(array_xml) return cim_xml.PROPERTY_ARRAY( self.name, self.type, value_xml, self.array_size, self.class_origin, self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers) elif self.type == 'reference': # scalar if self.value is None: value_xml = None else: value_xml = cim_xml.VALUE_REFERENCE(self.value.tocimxml()) return cim_xml.PROPERTY_REFERENCE( self.name, value_xml, reference_class=self.reference_class, class_origin=self.class_origin, propagated=self.propagated, qualifiers=qualifiers) else: # scalar non-reference if self.value is None: value_xml = None else: if self.embedded_object is not None: assert isinstance(self.value, (CIMInstance, CIMClass)) value_xml = cim_xml.VALUE(self.value.tocimxml().toxml()) else: value_xml = cim_xml.VALUE(atomic_to_cim_xml(self.value)) return cim_xml.PROPERTY( self.name, self.type, value_xml, class_origin=self.class_origin, propagated=self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers)
['def', 'tocimxml', '(', 'self', ')', ':', 'qualifiers', '=', '[', 'q', '.', 'tocimxml', '(', ')', 'for', 'q', 'in', 'self', '.', 'qualifiers', '.', 'values', '(', ')', ']', 'if', 'self', '.', 'is_array', ':', '# pylint: disable=no-else-return', 'assert', 'self', '.', 'type', '!=', "'reference'", 'if', 'self', '.', 'value', 'is', 'None', ':', 'value_xml', '=', 'None', 'else', ':', 'array_xml', '=', '[', ']', 'for', 'v', 'in', 'self', '.', 'value', ':', 'if', 'v', 'is', 'None', ':', 'if', 'SEND_VALUE_NULL', ':', 'array_xml', '.', 'append', '(', 'cim_xml', '.', 'VALUE_NULL', '(', ')', ')', 'else', ':', 'array_xml', '.', 'append', '(', 'cim_xml', '.', 'VALUE', '(', 'None', ')', ')', 'elif', 'self', '.', 'embedded_object', 'is', 'not', 'None', ':', 'assert', 'isinstance', '(', 'v', ',', '(', 'CIMInstance', ',', 'CIMClass', ')', ')', 'array_xml', '.', 'append', '(', 'cim_xml', '.', 'VALUE', '(', 'v', '.', 'tocimxml', '(', ')', '.', 'toxml', '(', ')', ')', ')', 'else', ':', 'array_xml', '.', 'append', '(', 'cim_xml', '.', 'VALUE', '(', 'atomic_to_cim_xml', '(', 'v', ')', ')', ')', 'value_xml', '=', 'cim_xml', '.', 'VALUE_ARRAY', '(', 'array_xml', ')', 'return', 'cim_xml', '.', 'PROPERTY_ARRAY', '(', 'self', '.', 'name', ',', 'self', '.', 'type', ',', 'value_xml', ',', 'self', '.', 'array_size', ',', 'self', '.', 'class_origin', ',', 'self', '.', 'propagated', ',', 'embedded_object', '=', 'self', '.', 'embedded_object', ',', 'qualifiers', '=', 'qualifiers', ')', 'elif', 'self', '.', 'type', '==', "'reference'", ':', '# scalar', 'if', 'self', '.', 'value', 'is', 'None', ':', 'value_xml', '=', 'None', 'else', ':', 'value_xml', '=', 'cim_xml', '.', 'VALUE_REFERENCE', '(', 'self', '.', 'value', '.', 'tocimxml', '(', ')', ')', 'return', 'cim_xml', '.', 'PROPERTY_REFERENCE', '(', 'self', '.', 'name', ',', 'value_xml', ',', 'reference_class', '=', 'self', '.', 'reference_class', ',', 'class_origin', '=', 'self', '.', 'class_origin', ',', 'propagated', '=', 'self', '.', 'propagated', ',', 'qualifiers', '=', 'qualifiers', ')', 'else', ':', '# scalar non-reference', 'if', 'self', '.', 'value', 'is', 'None', ':', 'value_xml', '=', 'None', 'else', ':', 'if', 'self', '.', 'embedded_object', 'is', 'not', 'None', ':', 'assert', 'isinstance', '(', 'self', '.', 'value', ',', '(', 'CIMInstance', ',', 'CIMClass', ')', ')', 'value_xml', '=', 'cim_xml', '.', 'VALUE', '(', 'self', '.', 'value', '.', 'tocimxml', '(', ')', '.', 'toxml', '(', ')', ')', 'else', ':', 'value_xml', '=', 'cim_xml', '.', 'VALUE', '(', 'atomic_to_cim_xml', '(', 'self', '.', 'value', ')', ')', 'return', 'cim_xml', '.', 'PROPERTY', '(', 'self', '.', 'name', ',', 'self', '.', 'type', ',', 'value_xml', ',', 'class_origin', '=', 'self', '.', 'class_origin', ',', 'propagated', '=', 'self', '.', 'propagated', ',', 'embedded_object', '=', 'self', '.', 'embedded_object', ',', 'qualifiers', '=', 'qualifiers', ')']
Return the CIM-XML representation of this CIM property, as an object of an appropriate subclass of :term:`Element`. The returned CIM-XML representation is a `PROPERTY`, `PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the property type, and consistent with :term:`DSP0201`. Note that array properties cannot be of reference type. The order of qualifiers in the returned CIM-XML representation is preserved from the :class:`~pywbem.CIMProperty` object. Returns: The CIM-XML representation, as an object of an appropriate subclass of :term:`Element`.
['Return', 'the', 'CIM', '-', 'XML', 'representation', 'of', 'this', 'CIM', 'property', 'as', 'an', 'object', 'of', 'an', 'appropriate', 'subclass', 'of', ':', 'term', ':', 'Element', '.']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L4883-L4967
96
rocky/python-spark
spark_parser/spark.py
GenericParser.get_profile_info
def get_profile_info(self): """Show the accumulated results of how many times each rule was used""" return sorted(self.profile_info.items(), key=lambda kv: kv[1], reverse=False) return
python
def get_profile_info(self): """Show the accumulated results of how many times each rule was used""" return sorted(self.profile_info.items(), key=lambda kv: kv[1], reverse=False) return
['def', 'get_profile_info', '(', 'self', ')', ':', 'return', 'sorted', '(', 'self', '.', 'profile_info', '.', 'items', '(', ')', ',', 'key', '=', 'lambda', 'kv', ':', 'kv', '[', '1', ']', ',', 'reverse', '=', 'False', ')', 'return']
Show the accumulated results of how many times each rule was used
['Show', 'the', 'accumulated', 'results', 'of', 'how', 'many', 'times', 'each', 'rule', 'was', 'used']
train
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/spark_parser/spark.py#L983-L988
97
scivision/pymap3d
pymap3d/sidereal.py
juliandate
def juliandate(time: datetime) -> float: """ Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date """ times = np.atleast_1d(time) assert times.ndim == 1 jd = np.empty(times.size) for i, t in enumerate(times): if t.month < 3: year = t.year - 1 month = t.month + 12 else: year = t.year month = t.month A = int(year / 100.0) B = 2 - A + int(A / 4.) C = ((t.second / 60. + t.minute) / 60. + t.hour) / 24. jd[i] = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + t.day + B - 1524.5 + C) return jd.squeeze()
python
def juliandate(time: datetime) -> float: """ Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date """ times = np.atleast_1d(time) assert times.ndim == 1 jd = np.empty(times.size) for i, t in enumerate(times): if t.month < 3: year = t.year - 1 month = t.month + 12 else: year = t.year month = t.month A = int(year / 100.0) B = 2 - A + int(A / 4.) C = ((t.second / 60. + t.minute) / 60. + t.hour) / 24. jd[i] = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + t.day + B - 1524.5 + C) return jd.squeeze()
['def', 'juliandate', '(', 'time', ':', 'datetime', ')', '->', 'float', ':', 'times', '=', 'np', '.', 'atleast_1d', '(', 'time', ')', 'assert', 'times', '.', 'ndim', '==', '1', 'jd', '=', 'np', '.', 'empty', '(', 'times', '.', 'size', ')', 'for', 'i', ',', 't', 'in', 'enumerate', '(', 'times', ')', ':', 'if', 't', '.', 'month', '<', '3', ':', 'year', '=', 't', '.', 'year', '-', '1', 'month', '=', 't', '.', 'month', '+', '12', 'else', ':', 'year', '=', 't', '.', 'year', 'month', '=', 't', '.', 'month', 'A', '=', 'int', '(', 'year', '/', '100.0', ')', 'B', '=', '2', '-', 'A', '+', 'int', '(', 'A', '/', '4.', ')', 'C', '=', '(', '(', 't', '.', 'second', '/', '60.', '+', 't', '.', 'minute', ')', '/', '60.', '+', 't', '.', 'hour', ')', '/', '24.', 'jd', '[', 'i', ']', '=', '(', 'int', '(', '365.25', '*', '(', 'year', '+', '4716', ')', ')', '+', 'int', '(', '30.6001', '*', '(', 'month', '+', '1', ')', ')', '+', 't', '.', 'day', '+', 'B', '-', '1524.5', '+', 'C', ')', 'return', 'jd', '.', 'squeeze', '(', ')']
Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date
['Python', 'datetime', 'to', 'Julian', 'time']
train
https://github.com/scivision/pymap3d/blob/c9cf676594611cdb52ff7e0eca6388c80ed4f63f/pymap3d/sidereal.py#L58-L97
98
Phylliade/ikpy
contrib/transformations.py
pose_to_list
def pose_to_list(pose): """ Convert a Pose or PoseStamped in Python list ((position), (quaternion)) :param pose: geometry_msgs.msg.PoseStamped or geometry_msgs.msg.Pose :return: the equivalent in list ((position), (quaternion)) """ if type(pose) == geometry_msgs.msg.PoseStamped: return [[ pose.pose.position.x, pose.pose.position.y, pose.pose.position.z ], [ pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w ]] elif type(pose) == geometry_msgs.msg.Pose: return [[pose.position.x, pose.position.y, pose.position.z], [ pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w ]] else: raise Exception("pose_to_list: parameter of type %s unexpected", str(type(pose)))
python
def pose_to_list(pose): """ Convert a Pose or PoseStamped in Python list ((position), (quaternion)) :param pose: geometry_msgs.msg.PoseStamped or geometry_msgs.msg.Pose :return: the equivalent in list ((position), (quaternion)) """ if type(pose) == geometry_msgs.msg.PoseStamped: return [[ pose.pose.position.x, pose.pose.position.y, pose.pose.position.z ], [ pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w ]] elif type(pose) == geometry_msgs.msg.Pose: return [[pose.position.x, pose.position.y, pose.position.z], [ pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w ]] else: raise Exception("pose_to_list: parameter of type %s unexpected", str(type(pose)))
['def', 'pose_to_list', '(', 'pose', ')', ':', 'if', 'type', '(', 'pose', ')', '==', 'geometry_msgs', '.', 'msg', '.', 'PoseStamped', ':', 'return', '[', '[', 'pose', '.', 'pose', '.', 'position', '.', 'x', ',', 'pose', '.', 'pose', '.', 'position', '.', 'y', ',', 'pose', '.', 'pose', '.', 'position', '.', 'z', ']', ',', '[', 'pose', '.', 'pose', '.', 'orientation', '.', 'x', ',', 'pose', '.', 'pose', '.', 'orientation', '.', 'y', ',', 'pose', '.', 'pose', '.', 'orientation', '.', 'z', ',', 'pose', '.', 'pose', '.', 'orientation', '.', 'w', ']', ']', 'elif', 'type', '(', 'pose', ')', '==', 'geometry_msgs', '.', 'msg', '.', 'Pose', ':', 'return', '[', '[', 'pose', '.', 'position', '.', 'x', ',', 'pose', '.', 'position', '.', 'y', ',', 'pose', '.', 'position', '.', 'z', ']', ',', '[', 'pose', '.', 'orientation', '.', 'x', ',', 'pose', '.', 'orientation', '.', 'y', ',', 'pose', '.', 'orientation', '.', 'z', ',', 'pose', '.', 'orientation', '.', 'w', ']', ']', 'else', ':', 'raise', 'Exception', '(', '"pose_to_list: parameter of type %s unexpected"', ',', 'str', '(', 'type', '(', 'pose', ')', ')', ')']
Convert a Pose or PoseStamped in Python list ((position), (quaternion)) :param pose: geometry_msgs.msg.PoseStamped or geometry_msgs.msg.Pose :return: the equivalent in list ((position), (quaternion))
['Convert', 'a', 'Pose', 'or', 'PoseStamped', 'in', 'Python', 'list', '((', 'position', ')', '(', 'quaternion', '))', ':', 'param', 'pose', ':', 'geometry_msgs', '.', 'msg', '.', 'PoseStamped', 'or', 'geometry_msgs', '.', 'msg', '.', 'Pose', ':', 'return', ':', 'the', 'equivalent', 'in', 'list', '((', 'position', ')', '(', 'quaternion', '))']
train
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/contrib/transformations.py#L48-L68
99
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/device_directory/device_directory.py
DeviceDirectoryAPI.list_device_events
def list_device_events(self, **kwargs): """List all device logs. :param int limit: The number of logs to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get logs after/starting at given `device_event_id` :param dict filters: Dictionary of filters to apply. :return: list of :py:class:`DeviceEvent` objects :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, DeviceEvent, True) api = self._get_api(device_directory.DefaultApi) return PaginatedResponse(api.device_log_list, lwrap_type=DeviceEvent, **kwargs)
python
def list_device_events(self, **kwargs): """List all device logs. :param int limit: The number of logs to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get logs after/starting at given `device_event_id` :param dict filters: Dictionary of filters to apply. :return: list of :py:class:`DeviceEvent` objects :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, DeviceEvent, True) api = self._get_api(device_directory.DefaultApi) return PaginatedResponse(api.device_log_list, lwrap_type=DeviceEvent, **kwargs)
['def', 'list_device_events', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '=', 'self', '.', '_verify_sort_options', '(', 'kwargs', ')', 'kwargs', '=', 'self', '.', '_verify_filters', '(', 'kwargs', ',', 'DeviceEvent', ',', 'True', ')', 'api', '=', 'self', '.', '_get_api', '(', 'device_directory', '.', 'DefaultApi', ')', 'return', 'PaginatedResponse', '(', 'api', '.', 'device_log_list', ',', 'lwrap_type', '=', 'DeviceEvent', ',', '*', '*', 'kwargs', ')']
List all device logs. :param int limit: The number of logs to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get logs after/starting at given `device_event_id` :param dict filters: Dictionary of filters to apply. :return: list of :py:class:`DeviceEvent` objects :rtype: PaginatedResponse
['List', 'all', 'device', 'logs', '.']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/device_directory/device_directory.py#L290-L305