Dataset Preview
37.8 MB
Viewer
Unnamed: 0 (int64)repository_name (string)func_path_in_repository (string)func_name (string)whole_func_string (string)language (string)func_code_string (string)func_code_tokens (string)func_documentation_string (string)func_documentation_tokens (string)split_name (string)func_code_url (string)
0
"getsentry/libsourcemap"
"libsourcemap/highlevel.py"
"View.get_original_function_name"
"def get_original_function_name(self, line, col, minified_name, minified_source): """Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope. """ # Silently ignore underflows if line < 0 or col < 0: return None minified_name = minified_name.encode('utf-8') sout = _ffi.new('const char **') try: slen = rustcall(_lib.lsm_view_get_original_function_name, self._get_ptr(), line, col, minified_name, minified_source, sout) if slen > 0: return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace') except SourceMapError: # In some rare cases the library is/was known to panic. We do # not want to report this upwards (this happens on slicing # out of range on older rust versions in the rust-sourcemap # library) pass"
"python"
"def get_original_function_name(self, line, col, minified_name, minified_source): """Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope. """ # Silently ignore underflows if line < 0 or col < 0: return None minified_name = minified_name.encode('utf-8') sout = _ffi.new('const char **') try: slen = rustcall(_lib.lsm_view_get_original_function_name, self._get_ptr(), line, col, minified_name, minified_source, sout) if slen > 0: return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace') except SourceMapError: # In some rare cases the library is/was known to panic. We do # not want to report this upwards (this happens on slicing # out of range on older rust versions in the rust-sourcemap # library) pass"
"['def', 'get_original_function_name', '(', 'self', ',', 'line', ',', 'col', ',', 'minified_name', ',', 'minified_source', ')', ':', '# Silently ignore underflows', 'if', 'line', '<', '0', 'or', 'col', '<', '0', ':', 'return', 'None', 'minified_name', '=', 'minified_name', '.', 'encode', '(', "'utf-8'", ')', 'sout', '=', '_ffi', '.', 'new', '(', "'const char **'", ')', 'try', ':', 'slen', '=', 'rustcall', '(', '_lib', '.', 'lsm_view_get_original_function_name', ',', 'self', '.', '_get_ptr', '(', ')', ',', 'line', ',', 'col', ',', 'minified_name', ',', 'minified_source', ',', 'sout', ')', 'if', 'slen', '>', '0', ':', 'return', '_ffi', '.', 'unpack', '(', 'sout', '[', '0', ']', ',', 'slen', ')', '.', 'decode', '(', "'utf-8'", ',', "'replace'", ')', 'except', 'SourceMapError', ':', '# In some rare cases the library is/was known to panic. We do', '# not want to report this upwards (this happens on slicing', '# out of range on older rust versions in the rust-sourcemap', '# library)', 'pass']"
"Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope."
"['Given', 'a', 'token', 'location', 'and', 'a', 'minified', 'function', 'name', 'and', 'the', 'minified', 'source', 'file', 'this', 'returns', 'the', 'original', 'function', 'name', 'if', 'it', 'can', 'be', 'found', 'of', 'the', 'minified', 'function', 'in', 'scope', '.']"
"train"
"https://github.com/getsentry/libsourcemap/blob/94b5a34814fafee9dc23da8ec0ccca77f30e3370/libsourcemap/highlevel.py#L163-L185"
1
"brocade/pynos"
"pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py"
"brocade_mac_address_table.get_mac_address_table_input_request_type_get_interface_based_request_mac_type"
"def get_mac_address_table_input_request_type_get_interface_based_request_mac_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_interface_based_request = ET.SubElement(request_type, "get-interface-based-request") mac_type = ET.SubElement(get_interface_based_request, "mac-type") mac_type.text = kwargs.pop('mac_type') callback = kwargs.pop('callback', self._callback) return callback(config)"
"python"
"def get_mac_address_table_input_request_type_get_interface_based_request_mac_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_interface_based_request = ET.SubElement(request_type, "get-interface-based-request") mac_type = ET.SubElement(get_interface_based_request, "mac-type") mac_type.text = kwargs.pop('mac_type') callback = kwargs.pop('callback', self._callback) return callback(config)"
"['def', 'get_mac_address_table_input_request_type_get_interface_based_request_mac_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_mac_address_table', '=', 'ET', '.', 'Element', '(', '"get_mac_address_table"', ')', 'config', '=', 'get_mac_address_table', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_mac_address_table', ',', '"input"', ')', 'request_type', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"request-type"', ')', 'get_interface_based_request', '=', 'ET', '.', 'SubElement', '(', 'request_type', ',', '"get-interface-based-request"', ')', 'mac_type', '=', 'ET', '.', 'SubElement', '(', 'get_interface_based_request', ',', '"mac-type"', ')', 'mac_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'mac_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']"
"Auto Generated Code"
"['Auto', 'Generated', 'Code']"
"train"
"https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py#L297-L310"
2
"nicolargo/glances"
"glances/plugins/glances_memswap.py"
"Plugin.update_views"
"def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert and log self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])"
"python"
"def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert and log self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])"
"['def', 'update_views', '(', 'self', ')', ':', "# Call the father's method", 'super', '(', 'Plugin', ',', 'self', ')', '.', 'update_views', '(', ')', '# Add specifics informations', '# Alert and log', 'self', '.', 'views', '[', "'used'", ']', '[', "'decoration'", ']', '=', 'self', '.', 'get_alert_log', '(', 'self', '.', 'stats', '[', "'used'", ']', ',', 'maximum', '=', 'self', '.', 'stats', '[', "'total'", ']', ')']"
"Update stats views."
"['Update', 'stats', 'views', '.']"
"train"
"https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_memswap.py#L130-L137"
3
"DLR-RM/RAFCON"
"source/rafcon/gui/helpers/meta_data.py"
"contains_geometric_info"
"def contains_geometric_info(var): """ Check whether the passed variable is a tuple with two floats or integers """ return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var)"
"python"
"def contains_geometric_info(var): """ Check whether the passed variable is a tuple with two floats or integers """ return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var)"
"['def', 'contains_geometric_info', '(', 'var', ')', ':', 'return', 'isinstance', '(', 'var', ',', 'tuple', ')', 'and', 'len', '(', 'var', ')', '==', '2', 'and', 'all', '(', 'isinstance', '(', 'val', ',', '(', 'int', ',', 'float', ')', ')', 'for', 'val', 'in', 'var', ')']"
"Check whether the passed variable is a tuple with two floats or integers"
"['Check', 'whether', 'the', 'passed', 'variable', 'is', 'a', 'tuple', 'with', 'two', 'floats', 'or', 'integers']"
"train"
"https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/meta_data.py#L55-L57"
4
"ejeschke/ginga"
"ginga/gtk3w/ImageViewGtk.py"
"ImageViewGtk.save_plain_image_as_file"
"def save_plain_image_as_file(self, filepath, format='png', quality=90): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == 'jpeg': options.append('quality') values.append(str(quality)) pixbuf.savev(filepath, format, options, values)"
"python"
"def save_plain_image_as_file(self, filepath, format='png', quality=90): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == 'jpeg': options.append('quality') values.append(str(quality)) pixbuf.savev(filepath, format, options, values)"
"['def', 'save_plain_image_as_file', '(', 'self', ',', 'filepath', ',', 'format', '=', "'png'", ',', 'quality', '=', '90', ')', ':', 'pixbuf', '=', 'self', '.', 'get_plain_image_as_pixbuf', '(', ')', 'options', ',', 'values', '=', '[', ']', ',', '[', ']', 'if', 'format', '==', "'jpeg'", ':', 'options', '.', 'append', '(', "'quality'", ')', 'values', '.', 'append', '(', 'str', '(', 'quality', ')', ')', 'pixbuf', '.', 'savev', '(', 'filepath', ',', 'format', ',', 'options', ',', 'values', ')']"
"Used for generating thumbnails. Does not include overlaid graphics."
"['Used', 'for', 'generating', 'thumbnails', '.', 'Does', 'not', 'include', 'overlaid', 'graphics', '.']"
"train"
"https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/gtk3w/ImageViewGtk.py#L75-L84"
5
"poppy-project/pypot"
"pypot/vrep/remoteApiBindings/vrep.py"
"simxClearFloatSignal"
"def simxClearFloatSignal(clientID, signalName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(signalName) is str): signalName=signalName.encode('utf-8') return c_ClearFloatSignal(clientID, signalName, operationMode)"
"python"
"def simxClearFloatSignal(clientID, signalName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(signalName) is str): signalName=signalName.encode('utf-8') return c_ClearFloatSignal(clientID, signalName, operationMode)"
"['def', 'simxClearFloatSignal', '(', 'clientID', ',', 'signalName', ',', 'operationMode', ')', ':', 'if', '(', 'sys', '.', 'version_info', '[', '0', ']', '==', '3', ')', 'and', '(', 'type', '(', 'signalName', ')', 'is', 'str', ')', ':', 'signalName', '=', 'signalName', '.', 'encode', '(', "'utf-8'", ')', 'return', 'c_ClearFloatSignal', '(', 'clientID', ',', 'signalName', ',', 'operationMode', ')']"
"Please have a look at the function description/documentation in the V-REP user manual"
"['Please', 'have', 'a', 'look', 'at', 'the', 'function', 'description', '/', 'documentation', 'in', 'the', 'V', '-', 'REP', 'user', 'manual']"
"train"
"https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L900-L907"
6
"cloudify-cosmo/repex"
"repex.py"
"Repex.find_matches"
"def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))"
"python"
"def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))"
"['def', 'find_matches', '(', 'self', ',', 'content', ',', 'file_to_handle', ')', ':', '# look for all match groups in the content', 'groups', '=', '[', 'match', '.', 'groupdict', '(', ')', 'for', 'match', 'in', 'self', '.', 'match_expression', '.', 'finditer', '(', 'content', ')', ']', '# filter out content not in the matchgroup', 'matches', '=', '[', 'group', '[', "'matchgroup'", ']', 'for', 'group', 'in', 'groups', 'if', 'group', '.', 'get', '(', "'matchgroup'", ')', ']', 'logger', '.', 'info', '(', "'Found %s matches in %s'", ',', 'len', '(', 'matches', ')', ',', 'file_to_handle', ')', "# We only need the unique strings found as we'll be replacing each", '# of them. No need to replace the ones already replaced.', 'return', 'list', '(', 'set', '(', 'matches', ')', ')']"
"Find all matches of an expression in a file"
"['Find', 'all', 'matches', 'of', 'an', 'expression', 'in', 'a', 'file']"
"train"
"https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L605-L618"
7
"mariocj89/github-token"
"github_token/__init__.py"
"TokenFactory.create"
"def create(self): """Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before """ headers = dict() if self.tfa_token: headers["X-GitHub-OTP"] = self.tfa_token token_name = self.app_name + platform.node() # node specific in case the user has multiple hosts payload = dict(note=token_name, scopes=self.scopes) response = requests.post( self.api_url + "authorizations", auth=(self.user, self.password), headers=headers, json=payload ) if response.status_code == 401 and "required" in response.headers.get("X-GitHub-OTP", ""): raise TFARequired("TFA required for the user") if response.status_code == 422: raise AlreadyExistsError("APP already exists. Please delete {} token".format(token_name)) if response.status_code == 401: raise BadPassword("Bad User/Password") response.raise_for_status() return response.json()["token"]"
"python"
"def create(self): """Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before """ headers = dict() if self.tfa_token: headers["X-GitHub-OTP"] = self.tfa_token token_name = self.app_name + platform.node() # node specific in case the user has multiple hosts payload = dict(note=token_name, scopes=self.scopes) response = requests.post( self.api_url + "authorizations", auth=(self.user, self.password), headers=headers, json=payload ) if response.status_code == 401 and "required" in response.headers.get("X-GitHub-OTP", ""): raise TFARequired("TFA required for the user") if response.status_code == 422: raise AlreadyExistsError("APP already exists. Please delete {} token".format(token_name)) if response.status_code == 401: raise BadPassword("Bad User/Password") response.raise_for_status() return response.json()["token"]"
"['def', 'create', '(', 'self', ')', ':', 'headers', '=', 'dict', '(', ')', 'if', 'self', '.', 'tfa_token', ':', 'headers', '[', '"X-GitHub-OTP"', ']', '=', 'self', '.', 'tfa_token', 'token_name', '=', 'self', '.', 'app_name', '+', 'platform', '.', 'node', '(', ')', '# node specific in case the user has multiple hosts', 'payload', '=', 'dict', '(', 'note', '=', 'token_name', ',', 'scopes', '=', 'self', '.', 'scopes', ')', 'response', '=', 'requests', '.', 'post', '(', 'self', '.', 'api_url', '+', '"authorizations"', ',', 'auth', '=', '(', 'self', '.', 'user', ',', 'self', '.', 'password', ')', ',', 'headers', '=', 'headers', ',', 'json', '=', 'payload', ')', 'if', 'response', '.', 'status_code', '==', '401', 'and', '"required"', 'in', 'response', '.', 'headers', '.', 'get', '(', '"X-GitHub-OTP"', ',', '""', ')', ':', 'raise', 'TFARequired', '(', '"TFA required for the user"', ')', 'if', 'response', '.', 'status_code', '==', '422', ':', 'raise', 'AlreadyExistsError', '(', '"APP already exists. Please delete {} token"', '.', 'format', '(', 'token_name', ')', ')', 'if', 'response', '.', 'status_code', '==', '401', ':', 'raise', 'BadPassword', '(', '"Bad User/Password"', ')', 'response', '.', 'raise_for_status', '(', ')', 'return', 'response', '.', 'json', '(', ')', '[', '"token"', ']']"
"Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before"
"['Creates', 'a', 'token']"
"train"
"https://github.com/mariocj89/github-token/blob/8ca85fa51a52aef94cfb4f851eb229ee500bc28f/github_token/__init__.py#L81-L108"
8
"brycedrennan/eulerian-magnification"
"eulerian_magnification/base.py"
"combine_pyramid_and_save"
"def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'): """Combine a gaussian video representation with the original and save to file""" width, height = get_frame_dimensions(orig_video[0]) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print("Outputting to %s" % save_filename) writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1) for x in range(0, g_video.shape[0]): img = np.ndarray(shape=g_video[x].shape, dtype='float') img[:] = g_video[x] for i in range(enlarge_multiple): img = cv2.pyrUp(img) img[:height, :width] = img[:height, :width] + orig_video[x] res = cv2.convertScaleAbs(img[:height, :width]) writer.write(res)"
"python"
"def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'): """Combine a gaussian video representation with the original and save to file""" width, height = get_frame_dimensions(orig_video[0]) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print("Outputting to %s" % save_filename) writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1) for x in range(0, g_video.shape[0]): img = np.ndarray(shape=g_video[x].shape, dtype='float') img[:] = g_video[x] for i in range(enlarge_multiple): img = cv2.pyrUp(img) img[:height, :width] = img[:height, :width] + orig_video[x] res = cv2.convertScaleAbs(img[:height, :width]) writer.write(res)"
"['def', 'combine_pyramid_and_save', '(', 'g_video', ',', 'orig_video', ',', 'enlarge_multiple', ',', 'fps', ',', 'save_filename', '=', "'media/output.avi'", ')', ':', 'width', ',', 'height', '=', 'get_frame_dimensions', '(', 'orig_video', '[', '0', ']', ')', 'fourcc', '=', 'cv2', '.', 'VideoWriter_fourcc', '(', '*', "'MJPG'", ')', 'print', '(', '"Outputting to %s"', '%', 'save_filename', ')', 'writer', '=', 'cv2', '.', 'VideoWriter', '(', 'save_filename', ',', 'fourcc', ',', 'fps', ',', '(', 'width', ',', 'height', ')', ',', '1', ')', 'for', 'x', 'in', 'range', '(', '0', ',', 'g_video', '.', 'shape', '[', '0', ']', ')', ':', 'img', '=', 'np', '.', 'ndarray', '(', 'shape', '=', 'g_video', '[', 'x', ']', '.', 'shape', ',', 'dtype', '=', "'float'", ')', 'img', '[', ':', ']', '=', 'g_video', '[', 'x', ']', 'for', 'i', 'in', 'range', '(', 'enlarge_multiple', ')', ':', 'img', '=', 'cv2', '.', 'pyrUp', '(', 'img', ')', 'img', '[', ':', 'height', ',', ':', 'width', ']', '=', 'img', '[', ':', 'height', ',', ':', 'width', ']', '+', 'orig_video', '[', 'x', ']', 'res', '=', 'cv2', '.', 'convertScaleAbs', '(', 'img', '[', ':', 'height', ',', ':', 'width', ']', ')', 'writer', '.', 'write', '(', 'res', ')']"
"Combine a gaussian video representation with the original and save to file"
"['Combine', 'a', 'gaussian', 'video', 'representation', 'with', 'the', 'original', 'and', 'save', 'to', 'file']"
"train"
"https://github.com/brycedrennan/eulerian-magnification/blob/9ae0651fe3334176300d183f8240ad36d77759a9/eulerian_magnification/base.py#L108-L122"
9
"PmagPy/PmagPy"
"pmagpy/pmag.py"
"PintPars"
"def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs): """ calculate the paleointensity magic parameters make some definitions """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: meth_key = 'method_codes' beta_key = 'int_b_beta' temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') z_key = 'int_z' drats_key = 'int_drats' drat_key = 'int_drat' md_key = 'int_md' dec_key = 'dir_dec' inc_key = 'dir_inc' mad_key = 'int_mad_free' dang_key = 'int_dang' ptrm_key = 'int_n_ptrm' theta_key = 'int_theta' gamma_key = 'int_gamma' delta_key = 'int_delta' frac_key = 'int_frac' gmax_key = 'int_gmax' scat_key = 'int_scat' else: beta_key = 'specimen_b_beta' meth_key = 'magic_method_codes' temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max' z_key = 'specimen_z' drats_key = 'specimen_drats' drat_key = 'specimen_drat' md_key = 'specimen_md' dec_key = 'specimen_dec' inc_key = 'specimen_inc' mad_key = 'specimen_int_mad' dang_key = 'specimen_dang' ptrm_key = 'specimen_int_ptrm_n' theta_key = 'specimen_theta' gamma_key = 'specimen_gamma' delta_key = 'specimen_delta' frac_key = 'specimen_frac' gmax_key = 'specimen_gmax' scat_key = 'specimen_scat' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", "" zptrm_check = [] first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[ 0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5] if len(araiblock) > 6: # used only for perpendicular method of paleointensity ThetaChecks = araiblock[6] # used only for perpendicular method of paleointensity DeltaChecks = araiblock[7] xi, yi, diffcum = [], [], 0 xiz, xzi, yiz, yzi = [], [], [], [] Nptrm, dmax = 0, -1e-22 # check if even zero and infield steps if len(first_Z) > len(first_I): maxe = len(first_I) - 1 else: maxe = len(first_Z) - 1 if end == 0 or end > maxe: end = maxe # get the MAD, DANG, etc. for directional data bstep = araiblock[0][start][0] estep = araiblock[0][end][0] zstart, zend = 0, len(zijdblock) for k in range(len(zijdblock)): zrec = zijdblock[k] if zrec[0] == bstep: zstart = k if zrec[0] == estep: zend = k PCA = domean(zijdblock, zstart, zend, 'DE-BFL') D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz for rec in zijdblock: D.append((rec[1], rec[2], rec[3])) Du.append((rec[1], rec[2])) if rec[4] == 1: Dzi.append((rec[1], rec[2])) # if this is ZI step else: Diz.append((rec[1], rec[2])) # if this is IZ step # calculate the vector difference sum vds = dovds(D) b_zi, b_iz = [], [] # collect data included in ZigZag calculation if end + 1 >= len(first_Z): stop = end - 1 else: stop = end for k in range(start, end + 1): for l in range(len(first_I)): irec = first_I[l] if irec[0] == first_Z[k][0]: xi.append(irec[3]) yi.append(first_Z[k][3]) pars, errcode = int_pars(xi, yi, vds) if errcode == 1: return pars, errcode # for k in range(start,end+1): for k in range(len(first_Z) - 1): for l in range(k): # only go down to 10% of NRM..... if old_div(first_Z[k][3], vds) > 0.1: irec = first_I[l] if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step xzi = irec[3] yzi = first_Z[k][3] xiz = first_I[l + 1][3] yiz = first_Z[k + 1][3] slope = np.arctan2((yzi - yiz), (xiz - xzi)) r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2) if r > .1 * vds: b_zi.append(slope) # suppress noise elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step xiz = irec[3] yiz = first_Z[k][3] xzi = first_I[l + 1][3] yzi = first_Z[k + 1][3] slope = np.arctan2((yiz - yzi), (xzi - xiz)) r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2) if r > .1 * vds: b_iz.append(slope) # suppress noise # ZigZag, Frat, Trat = -1, 0, 0 if len(Diz) > 2 and len(Dzi) > 2: ZigZag = 0 dizp = fisher_mean(Diz) # get Fisher stats on IZ steps dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps dup = fisher_mean(Du) # get Fisher stats on all steps # # if directions are TOO well grouped, can get false positive for ftest, so # angles must be > 3 degrees apart. # if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.: F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \ (dup['n'] - dzip['r'] - dizp['r'] ) # Watson test for common mean nf = 2. * (dup['n'] - 2.) # number of degees of freedom ftest = fcalc(2, nf) Frat = old_div(F, ftest) if Frat > 1.: ZigZag = Frat # fails zigzag on directions methcode = "SM-FTEST" # now do slopes if len(b_zi) > 2 and len(b_iz) > 2: bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev biz_m, biz_sig = gausspars(b_iz) n_zi = float(len(b_zi)) n_iz = float(len(b_iz)) b_diff = abs(bzi_m - biz_m) # difference in means # # avoid false positives - set 3 degree slope difference here too if b_diff > 3 * np.pi / 180.: nf = n_zi + n_iz - 2. # degrees of freedom svar = old_div(((n_zi - 1.) * bzi_sig**2 + (n_iz - 1.) * biz_sig**2), nf) T = old_div((b_diff), np.sqrt( svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t ttest = tcalc(nf, .05) # t-test at 95% conf. Trat = old_div(T, ttest) if Trat > 1 and Trat > Frat: ZigZag = Trat # fails zigzag on directions methcode = "SM-TTEST" pars[z_key] = ZigZag pars[meth_key] = methcode # do drats if len(ptrm_check) != 0: diffcum, drat_max = 0, 0 for prec in ptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 2 # don't count alteration that happens after this step if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] if abs(prec[3] - irec[3]) > drat_max: drat_max = abs(prec[3] - irec[3]) pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3]) elif len(zptrm_check) != 0: diffcum = 0 for prec in zptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 1 if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) else: pars[drats_key] = -1 pars[drat_key] = -1 # and the pTRM tails if len(ptrm_tail) != 0: for trec in ptrm_tail: step = trec[0] for irec in first_I: if irec[0] == step: break if abs(trec[3]) > dmax: dmax = abs(trec[3]) pars[md_key] = (100 * dmax / vds) else: pars[md_key] = -1 pars[min_key] = bstep pars[max_key] = estep pars[dec_key] = PCA["specimen_dec"] pars[inc_key] = PCA["specimen_inc"] pars[mad_key] = PCA["specimen_mad"] pars[dang_key] = PCA["specimen_dang"] pars[ptrm_key] = Nptrm # and the ThetaChecks if ThetaChecks != "": t = 0 for theta in ThetaChecks: if theta[0] >= bstep and theta[0] <= estep and theta[1] > t: t = theta[1] pars[theta_key] = t else: pars[theta_key] = -1 # and the DeltaChecks if DeltaChecks != "": d = 0 for delta in DeltaChecks: if delta[0] >= bstep and delta[0] <= estep and delta[1] > d: d = delta[1] pars[delta_key] else: pars[delta_key] = -1 pars[gamma_key] = -1 if GammaChecks != "": for gamma in GammaChecks: if gamma[0] <= estep: pars['specimen_gamma'] = gamma[1] # -------------------------------------------------------------- # From here added By Ron Shaar 11-Dec 2012 # New parameters defined in Shaar and Tauxe (2012): # FRAC (specimen_frac) - ranges from 0. to 1. # SCAT (specimen_scat) - takes 1/0 # gap_max (specimen_gmax) - ranges from 0. to 1. # -------------------------------------------------------------- # -------------------------------------------------------------- # FRAC is similar to Fvds, but the numerator is the vds fraction: # FRAC= [ vds (start,end)] / total vds ] # gap_max= max [ (vector difference) / vds (start,end)] # -------------------------------------------------------------- # collect all zijderveld data to arrays and calculate VDS z_temperatures = [row[0] for row in zijdblock] zdata = [] # array of zero-fields measurements in Cartezian coordinates # array of vector differences (for vds calculation) vector_diffs = [] NRM = zijdblock[0][3] # NRM for k in range(len(zijdblock)): DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)] cart = dir2cart(DIR) zdata.append(np.array([cart[0], cart[1], cart[2]])) if k > 0: vector_diffs.append( np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2))) # last vector difference: from the last point to the origin. vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2))) vds = sum(vector_diffs) # vds calculation zdata = np.array(zdata) vector_diffs = np.array(vector_diffs) # calculate the vds within the chosen segment vector_diffs_segment = vector_diffs[zstart:zend] # FRAC calculation FRAC = old_div(sum(vector_diffs_segment), vds) pars[frac_key] = FRAC # gap_max calculation max_FRAC_gap = max( old_div(vector_diffs_segment, sum(vector_diffs_segment))) pars[gmax_key] = max_FRAC_gap # --------------------------------------------------------------------- # Calculate the "scat box" # all data-points, pTRM checks, and tail-checks, should be inside a "scat box" # --------------------------------------------------------------------- # intialization # fail scat due to arai plot data points pars["fail_arai_beta_box_scatter"] = False pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks pars[scat_key] = "t" # Pass by default # -------------------------------------------------------------- # collect all Arai plot data points in arrays x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], [] NRMs = araiblock[0] PTRMs = araiblock[1] ptrm_checks = araiblock[2] ptrm_tail = araiblock[3] PTRMs_temperatures = [row[0] for row in PTRMs] NRMs_temperatures = [row[0] for row in NRMs] NRM = NRMs[0][3] for k in range(len(NRMs)): index_pTRMs = PTRMs_temperatures.index(NRMs[k][0]) x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM)) y_Arai.append(old_div(NRMs[k][3], NRM)) t_Arai.append(NRMs[k][0]) if NRMs[k][4] == 1: steps_Arai.append('ZI') else: steps_Arai.append('IZ') x_Arai = np.array(x_Arai) y_Arai = np.array(y_Arai) # -------------------------------------------------------------- # collect all pTRM check to arrays x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], [] x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_checks)): if ptrm_checks[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_ptrm_check_starting_point.append(x_Arai[index]) y_ptrm_check_starting_point.append(y_Arai[index]) ptrm_checks_starting_temperatures.append( starting_temperature) index_zerofield = zerofield_temperatures.index( ptrm_checks[k][0]) x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM)) y_ptrm_check.append( old_div(zerofields[index_zerofield][3], NRM)) ptrm_checks_temperatures.append(ptrm_checks[k][0]) break except: pass x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point) y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point) ptrm_checks_starting_temperatures = np.array( ptrm_checks_starting_temperatures) x_ptrm_check = np.array(x_ptrm_check) y_ptrm_check = np.array(y_ptrm_check) ptrm_checks_temperatures = np.array(ptrm_checks_temperatures) # -------------------------------------------------------------- # collect tail checks to arrays x_tail_check, y_tail_check, tail_check_temperatures = [], [], [] x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_tail)): if ptrm_tail[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_tail_check_starting_point.append(x_Arai[index]) y_tail_check_starting_point.append(y_Arai[index]) tail_checks_starting_temperatures.append( starting_temperature) index_infield = infield_temperatures.index( ptrm_tail[k][0]) x_tail_check.append( old_div(infields[index_infield][3], NRM)) y_tail_check.append( old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM)) tail_check_temperatures.append(ptrm_tail[k][0]) break except: pass x_tail_check = np.array(x_tail_check) y_tail_check = np.array(y_tail_check) tail_check_temperatures = np.array(tail_check_temperatures) x_tail_check_starting_point = np.array(x_tail_check_starting_point) y_tail_check_starting_point = np.array(y_tail_check_starting_point) tail_checks_starting_temperatures = np.array( tail_checks_starting_temperatures) # -------------------------------------------------------------- # collect the chosen segment in the Arai plot to arrays x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot # -------------------------------------------------------------- # collect pTRM checks in segment to arrays # notice, this is different than the conventional DRATS. # for scat calculation we take only the pTRM checks which were carried out # before reaching the highest temperature in the chosen segment x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], [] for k in range(len(ptrm_checks_temperatures)): if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]: x_ptrm_check_for_SCAT.append(x_ptrm_check[k]) y_ptrm_check_for_SCAT.append(y_ptrm_check[k]) x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT) y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT) # -------------------------------------------------------------- # collect Tail checks in segment to arrays # for scat calculation we take only the tail checks which were carried out # before reaching the highest temperature in the chosen segment x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], [] for k in range(len(tail_check_temperatures)): if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]: x_tail_check_for_SCAT.append(x_tail_check[k]) y_tail_check_for_SCAT.append(y_tail_check[k]) x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT) y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT) # -------------------------------------------------------------- # calculate the lines that define the scat box: # if threshold value for beta is not defined, then scat cannot be calculated (pass) # in this case, scat pass if beta_key in list(accept.keys()) and accept[beta_key] != "": b_beta_threshold = float(accept[beta_key]) b = pars[b_key] # best fit line cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass a = cm_y - b * cm_x # lines with slope = slope +/- 2*(specimen_b_beta) two_sigma_beta_threshold = 2 * b_beta_threshold two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b) # a line with a shallower slope (b + 2*beta*b) passing through the center of mass # y=a1+b1x b1 = b + two_sigma_slope_threshold a1 = cm_y - b1 * cm_x # bounding line with steeper slope (b - 2*beta*b) passing through the center of mass # y=a2+b2x b2 = b - two_sigma_slope_threshold a2 = cm_y - b2 * cm_x # lower bounding line of the 'beta box' # y=intercept1+slop1x slop1 = old_div(a1, ((old_div(a2, b2)))) intercept1 = a1 # higher bounding line of the 'beta box' # y=intercept2+slop2x slop2 = old_div(a2, ((old_div(a1, b1)))) intercept2 = a2 pars['specimen_scat_bounding_line_high'] = [intercept2, slop2] pars['specimen_scat_bounding_line_low'] = [intercept1, slop1] # -------------------------------------------------------------- # check if the Arai data points are in the 'box' # the two bounding lines ymin = intercept1 + x_Arai_segment * slop1 ymax = intercept2 + x_Arai_segment * slop2 # arrays of "True" or "False" check_1 = y_Arai_segment > ymax check_2 = y_Arai_segment < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_arai_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the pTRM checks data points are in the 'box' if len(x_ptrm_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_ptrm_check_for_SCAT * slop1 ymax = intercept2 + x_ptrm_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_ptrm_check_for_SCAT > ymax check_2 = y_ptrm_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_ptrm_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the tail checks data points are in the 'box' if len(x_tail_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_tail_check_for_SCAT * slop1 ymax = intercept2 + x_tail_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_tail_check_for_SCAT > ymax check_2 = y_tail_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_tail_beta_box_scatter"] = True # -------------------------------------------------------------- # check if specimen_scat is PASS or FAIL: if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]: pars[scat_key] = 'f' else: pars[scat_key] = 't' return pars, 0"
"python"
"def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs): """ calculate the paleointensity magic parameters make some definitions """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: meth_key = 'method_codes' beta_key = 'int_b_beta' temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') z_key = 'int_z' drats_key = 'int_drats' drat_key = 'int_drat' md_key = 'int_md' dec_key = 'dir_dec' inc_key = 'dir_inc' mad_key = 'int_mad_free' dang_key = 'int_dang' ptrm_key = 'int_n_ptrm' theta_key = 'int_theta' gamma_key = 'int_gamma' delta_key = 'int_delta' frac_key = 'int_frac' gmax_key = 'int_gmax' scat_key = 'int_scat' else: beta_key = 'specimen_b_beta' meth_key = 'magic_method_codes' temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max' z_key = 'specimen_z' drats_key = 'specimen_drats' drat_key = 'specimen_drat' md_key = 'specimen_md' dec_key = 'specimen_dec' inc_key = 'specimen_inc' mad_key = 'specimen_int_mad' dang_key = 'specimen_dang' ptrm_key = 'specimen_int_ptrm_n' theta_key = 'specimen_theta' gamma_key = 'specimen_gamma' delta_key = 'specimen_delta' frac_key = 'specimen_frac' gmax_key = 'specimen_gmax' scat_key = 'specimen_scat' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", "" zptrm_check = [] first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[ 0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5] if len(araiblock) > 6: # used only for perpendicular method of paleointensity ThetaChecks = araiblock[6] # used only for perpendicular method of paleointensity DeltaChecks = araiblock[7] xi, yi, diffcum = [], [], 0 xiz, xzi, yiz, yzi = [], [], [], [] Nptrm, dmax = 0, -1e-22 # check if even zero and infield steps if len(first_Z) > len(first_I): maxe = len(first_I) - 1 else: maxe = len(first_Z) - 1 if end == 0 or end > maxe: end = maxe # get the MAD, DANG, etc. for directional data bstep = araiblock[0][start][0] estep = araiblock[0][end][0] zstart, zend = 0, len(zijdblock) for k in range(len(zijdblock)): zrec = zijdblock[k] if zrec[0] == bstep: zstart = k if zrec[0] == estep: zend = k PCA = domean(zijdblock, zstart, zend, 'DE-BFL') D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz for rec in zijdblock: D.append((rec[1], rec[2], rec[3])) Du.append((rec[1], rec[2])) if rec[4] == 1: Dzi.append((rec[1], rec[2])) # if this is ZI step else: Diz.append((rec[1], rec[2])) # if this is IZ step # calculate the vector difference sum vds = dovds(D) b_zi, b_iz = [], [] # collect data included in ZigZag calculation if end + 1 >= len(first_Z): stop = end - 1 else: stop = end for k in range(start, end + 1): for l in range(len(first_I)): irec = first_I[l] if irec[0] == first_Z[k][0]: xi.append(irec[3]) yi.append(first_Z[k][3]) pars, errcode = int_pars(xi, yi, vds) if errcode == 1: return pars, errcode # for k in range(start,end+1): for k in range(len(first_Z) - 1): for l in range(k): # only go down to 10% of NRM..... if old_div(first_Z[k][3], vds) > 0.1: irec = first_I[l] if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step xzi = irec[3] yzi = first_Z[k][3] xiz = first_I[l + 1][3] yiz = first_Z[k + 1][3] slope = np.arctan2((yzi - yiz), (xiz - xzi)) r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2) if r > .1 * vds: b_zi.append(slope) # suppress noise elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step xiz = irec[3] yiz = first_Z[k][3] xzi = first_I[l + 1][3] yzi = first_Z[k + 1][3] slope = np.arctan2((yiz - yzi), (xzi - xiz)) r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2) if r > .1 * vds: b_iz.append(slope) # suppress noise # ZigZag, Frat, Trat = -1, 0, 0 if len(Diz) > 2 and len(Dzi) > 2: ZigZag = 0 dizp = fisher_mean(Diz) # get Fisher stats on IZ steps dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps dup = fisher_mean(Du) # get Fisher stats on all steps # # if directions are TOO well grouped, can get false positive for ftest, so # angles must be > 3 degrees apart. # if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.: F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \ (dup['n'] - dzip['r'] - dizp['r'] ) # Watson test for common mean nf = 2. * (dup['n'] - 2.) # number of degees of freedom ftest = fcalc(2, nf) Frat = old_div(F, ftest) if Frat > 1.: ZigZag = Frat # fails zigzag on directions methcode = "SM-FTEST" # now do slopes if len(b_zi) > 2 and len(b_iz) > 2: bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev biz_m, biz_sig = gausspars(b_iz) n_zi = float(len(b_zi)) n_iz = float(len(b_iz)) b_diff = abs(bzi_m - biz_m) # difference in means # # avoid false positives - set 3 degree slope difference here too if b_diff > 3 * np.pi / 180.: nf = n_zi + n_iz - 2. # degrees of freedom svar = old_div(((n_zi - 1.) * bzi_sig**2 + (n_iz - 1.) * biz_sig**2), nf) T = old_div((b_diff), np.sqrt( svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t ttest = tcalc(nf, .05) # t-test at 95% conf. Trat = old_div(T, ttest) if Trat > 1 and Trat > Frat: ZigZag = Trat # fails zigzag on directions methcode = "SM-TTEST" pars[z_key] = ZigZag pars[meth_key] = methcode # do drats if len(ptrm_check) != 0: diffcum, drat_max = 0, 0 for prec in ptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 2 # don't count alteration that happens after this step if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] if abs(prec[3] - irec[3]) > drat_max: drat_max = abs(prec[3] - irec[3]) pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3]) elif len(zptrm_check) != 0: diffcum = 0 for prec in zptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 1 if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) else: pars[drats_key] = -1 pars[drat_key] = -1 # and the pTRM tails if len(ptrm_tail) != 0: for trec in ptrm_tail: step = trec[0] for irec in first_I: if irec[0] == step: break if abs(trec[3]) > dmax: dmax = abs(trec[3]) pars[md_key] = (100 * dmax / vds) else: pars[md_key] = -1 pars[min_key] = bstep pars[max_key] = estep pars[dec_key] = PCA["specimen_dec"] pars[inc_key] = PCA["specimen_inc"] pars[mad_key] = PCA["specimen_mad"] pars[dang_key] = PCA["specimen_dang"] pars[ptrm_key] = Nptrm # and the ThetaChecks if ThetaChecks != "": t = 0 for theta in ThetaChecks: if theta[0] >= bstep and theta[0] <= estep and theta[1] > t: t = theta[1] pars[theta_key] = t else: pars[theta_key] = -1 # and the DeltaChecks if DeltaChecks != "": d = 0 for delta in DeltaChecks: if delta[0] >= bstep and delta[0] <= estep and delta[1] > d: d = delta[1] pars[delta_key] else: pars[delta_key] = -1 pars[gamma_key] = -1 if GammaChecks != "": for gamma in GammaChecks: if gamma[0] <= estep: pars['specimen_gamma'] = gamma[1] # -------------------------------------------------------------- # From here added By Ron Shaar 11-Dec 2012 # New parameters defined in Shaar and Tauxe (2012): # FRAC (specimen_frac) - ranges from 0. to 1. # SCAT (specimen_scat) - takes 1/0 # gap_max (specimen_gmax) - ranges from 0. to 1. # -------------------------------------------------------------- # -------------------------------------------------------------- # FRAC is similar to Fvds, but the numerator is the vds fraction: # FRAC= [ vds (start,end)] / total vds ] # gap_max= max [ (vector difference) / vds (start,end)] # -------------------------------------------------------------- # collect all zijderveld data to arrays and calculate VDS z_temperatures = [row[0] for row in zijdblock] zdata = [] # array of zero-fields measurements in Cartezian coordinates # array of vector differences (for vds calculation) vector_diffs = [] NRM = zijdblock[0][3] # NRM for k in range(len(zijdblock)): DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)] cart = dir2cart(DIR) zdata.append(np.array([cart[0], cart[1], cart[2]])) if k > 0: vector_diffs.append( np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2))) # last vector difference: from the last point to the origin. vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2))) vds = sum(vector_diffs) # vds calculation zdata = np.array(zdata) vector_diffs = np.array(vector_diffs) # calculate the vds within the chosen segment vector_diffs_segment = vector_diffs[zstart:zend] # FRAC calculation FRAC = old_div(sum(vector_diffs_segment), vds) pars[frac_key] = FRAC # gap_max calculation max_FRAC_gap = max( old_div(vector_diffs_segment, sum(vector_diffs_segment))) pars[gmax_key] = max_FRAC_gap # --------------------------------------------------------------------- # Calculate the "scat box" # all data-points, pTRM checks, and tail-checks, should be inside a "scat box" # --------------------------------------------------------------------- # intialization # fail scat due to arai plot data points pars["fail_arai_beta_box_scatter"] = False pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks pars[scat_key] = "t" # Pass by default # -------------------------------------------------------------- # collect all Arai plot data points in arrays x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], [] NRMs = araiblock[0] PTRMs = araiblock[1] ptrm_checks = araiblock[2] ptrm_tail = araiblock[3] PTRMs_temperatures = [row[0] for row in PTRMs] NRMs_temperatures = [row[0] for row in NRMs] NRM = NRMs[0][3] for k in range(len(NRMs)): index_pTRMs = PTRMs_temperatures.index(NRMs[k][0]) x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM)) y_Arai.append(old_div(NRMs[k][3], NRM)) t_Arai.append(NRMs[k][0]) if NRMs[k][4] == 1: steps_Arai.append('ZI') else: steps_Arai.append('IZ') x_Arai = np.array(x_Arai) y_Arai = np.array(y_Arai) # -------------------------------------------------------------- # collect all pTRM check to arrays x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], [] x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_checks)): if ptrm_checks[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_ptrm_check_starting_point.append(x_Arai[index]) y_ptrm_check_starting_point.append(y_Arai[index]) ptrm_checks_starting_temperatures.append( starting_temperature) index_zerofield = zerofield_temperatures.index( ptrm_checks[k][0]) x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM)) y_ptrm_check.append( old_div(zerofields[index_zerofield][3], NRM)) ptrm_checks_temperatures.append(ptrm_checks[k][0]) break except: pass x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point) y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point) ptrm_checks_starting_temperatures = np.array( ptrm_checks_starting_temperatures) x_ptrm_check = np.array(x_ptrm_check) y_ptrm_check = np.array(y_ptrm_check) ptrm_checks_temperatures = np.array(ptrm_checks_temperatures) # -------------------------------------------------------------- # collect tail checks to arrays x_tail_check, y_tail_check, tail_check_temperatures = [], [], [] x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_tail)): if ptrm_tail[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_tail_check_starting_point.append(x_Arai[index]) y_tail_check_starting_point.append(y_Arai[index]) tail_checks_starting_temperatures.append( starting_temperature) index_infield = infield_temperatures.index( ptrm_tail[k][0]) x_tail_check.append( old_div(infields[index_infield][3], NRM)) y_tail_check.append( old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM)) tail_check_temperatures.append(ptrm_tail[k][0]) break except: pass x_tail_check = np.array(x_tail_check) y_tail_check = np.array(y_tail_check) tail_check_temperatures = np.array(tail_check_temperatures) x_tail_check_starting_point = np.array(x_tail_check_starting_point) y_tail_check_starting_point = np.array(y_tail_check_starting_point) tail_checks_starting_temperatures = np.array( tail_checks_starting_temperatures) # -------------------------------------------------------------- # collect the chosen segment in the Arai plot to arrays x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot # -------------------------------------------------------------- # collect pTRM checks in segment to arrays # notice, this is different than the conventional DRATS. # for scat calculation we take only the pTRM checks which were carried out # before reaching the highest temperature in the chosen segment x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], [] for k in range(len(ptrm_checks_temperatures)): if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]: x_ptrm_check_for_SCAT.append(x_ptrm_check[k]) y_ptrm_check_for_SCAT.append(y_ptrm_check[k]) x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT) y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT) # -------------------------------------------------------------- # collect Tail checks in segment to arrays # for scat calculation we take only the tail checks which were carried out # before reaching the highest temperature in the chosen segment x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], [] for k in range(len(tail_check_temperatures)): if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]: x_tail_check_for_SCAT.append(x_tail_check[k]) y_tail_check_for_SCAT.append(y_tail_check[k]) x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT) y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT) # -------------------------------------------------------------- # calculate the lines that define the scat box: # if threshold value for beta is not defined, then scat cannot be calculated (pass) # in this case, scat pass if beta_key in list(accept.keys()) and accept[beta_key] != "": b_beta_threshold = float(accept[beta_key]) b = pars[b_key] # best fit line cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass a = cm_y - b * cm_x # lines with slope = slope +/- 2*(specimen_b_beta) two_sigma_beta_threshold = 2 * b_beta_threshold two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b) # a line with a shallower slope (b + 2*beta*b) passing through the center of mass # y=a1+b1x b1 = b + two_sigma_slope_threshold a1 = cm_y - b1 * cm_x # bounding line with steeper slope (b - 2*beta*b) passing through the center of mass # y=a2+b2x b2 = b - two_sigma_slope_threshold a2 = cm_y - b2 * cm_x # lower bounding line of the 'beta box' # y=intercept1+slop1x slop1 = old_div(a1, ((old_div(a2, b2)))) intercept1 = a1 # higher bounding line of the 'beta box' # y=intercept2+slop2x slop2 = old_div(a2, ((old_div(a1, b1)))) intercept2 = a2 pars['specimen_scat_bounding_line_high'] = [intercept2, slop2] pars['specimen_scat_bounding_line_low'] = [intercept1, slop1] # -------------------------------------------------------------- # check if the Arai data points are in the 'box' # the two bounding lines ymin = intercept1 + x_Arai_segment * slop1 ymax = intercept2 + x_Arai_segment * slop2 # arrays of "True" or "False" check_1 = y_Arai_segment > ymax check_2 = y_Arai_segment < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_arai_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the pTRM checks data points are in the 'box' if len(x_ptrm_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_ptrm_check_for_SCAT * slop1 ymax = intercept2 + x_ptrm_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_ptrm_check_for_SCAT > ymax check_2 = y_ptrm_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_ptrm_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the tail checks data points are in the 'box' if len(x_tail_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_tail_check_for_SCAT * slop1 ymax = intercept2 + x_tail_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_tail_check_for_SCAT > ymax check_2 = y_tail_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_tail_beta_box_scatter"] = True # -------------------------------------------------------------- # check if specimen_scat is PASS or FAIL: if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]: pars[scat_key] = 'f' else: pars[scat_key] = 't' return pars, 0"
"['def', 'PintPars', '(', 'datablock', ',', 'araiblock', ',', 'zijdblock', ',', 'start', ',', 'end', ',', 'accept', ',', '*', '*', 'kwargs', ')', ':', 'if', "'version'", 'in', 'list', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'and', 'kwargs', '[', "'version'", ']', '==', '3', ':', 'meth_key', '=', "'method_codes'", 'beta_key', '=', "'int_b_beta'", 'temp_key', ',', 'min_key', ',', 'max_key', '=', "'treat_temp'", ',', "'meas_step_min'", ',', "'meas_step_max'", 'dc_theta_key', ',', 'dc_phi_key', '=', "'treat_dc_field_theta'", ',', "'treat_dc_field_phi'", '# convert dataframe to list of dictionaries', 'datablock', '=', 'datablock', '.', 'to_dict', '(', "'records'", ')', 'z_key', '=', "'int_z'", 'drats_key', '=', "'int_drats'", 'drat_key', '=', "'int_drat'", 'md_key', '=', "'int_md'", 'dec_key', '=', "'dir_dec'", 'inc_key', '=', "'dir_inc'", 'mad_key', '=', "'int_mad_free'", 'dang_key', '=', "'int_dang'", 'ptrm_key', '=', "'int_n_ptrm'", 'theta_key', '=', "'int_theta'", 'gamma_key', '=', "'int_gamma'", 'delta_key', '=', "'int_delta'", 'frac_key', '=', "'int_frac'", 'gmax_key', '=', "'int_gmax'", 'scat_key', '=', "'int_scat'", 'else', ':', 'beta_key', '=', "'specimen_b_beta'", 'meth_key', '=', "'magic_method_codes'", 'temp_key', ',', 'min_key', ',', 'max_key', '=', "'treatment_temp'", ',', "'measurement_step_min'", ',', "'measurement_step_max'", 'z_key', '=', "'specimen_z'", 'drats_key', '=', "'specimen_drats'", 'drat_key', '=', "'specimen_drat'", 'md_key', '=', "'specimen_md'", 'dec_key', '=', "'specimen_dec'", 'inc_key', '=', "'specimen_inc'", 'mad_key', '=', "'specimen_int_mad'", 'dang_key', '=', "'specimen_dang'", 'ptrm_key', '=', "'specimen_int_ptrm_n'", 'theta_key', '=', "'specimen_theta'", 'gamma_key', '=', "'specimen_gamma'", 'delta_key', '=', "'specimen_delta'", 'frac_key', '=', "'specimen_frac'", 'gmax_key', '=', "'specimen_gmax'", 'scat_key', '=', "'specimen_scat'", 'first_Z', ',', 'first_I', ',', 'zptrm_check', ',', 'ptrm_check', ',', 'ptrm_tail', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'methcode', ',', 'ThetaChecks', ',', 'DeltaChecks', ',', 'GammaChecks', '=', '""', ',', '""', ',', '""', ',', '""', 'zptrm_check', '=', '[', ']', 'first_Z', ',', 'first_I', ',', 'ptrm_check', ',', 'ptrm_tail', ',', 'zptrm_check', ',', 'GammaChecks', '=', 'araiblock', '[', '0', ']', ',', 'araiblock', '[', '1', ']', ',', 'araiblock', '[', '2', ']', ',', 'araiblock', '[', '3', ']', ',', 'araiblock', '[', '4', ']', ',', 'araiblock', '[', '5', ']', 'if', 'len', '(', 'araiblock', ')', '>', '6', ':', '# used only for perpendicular method of paleointensity', 'ThetaChecks', '=', 'araiblock', '[', '6', ']', '# used only for perpendicular method of paleointensity', 'DeltaChecks', '=', 'araiblock', '[', '7', ']', 'xi', ',', 'yi', ',', 'diffcum', '=', '[', ']', ',', '[', ']', ',', '0', 'xiz', ',', 'xzi', ',', 'yiz', ',', 'yzi', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'Nptrm', ',', 'dmax', '=', '0', ',', '-', '1e-22', '# check if even zero and infield steps', 'if', 'len', '(', 'first_Z', ')', '>', 'len', '(', 'first_I', ')', ':', 'maxe', '=', 'len', '(', 'first_I', ')', '-', '1', 'else', ':', 'maxe', '=', 'len', '(', 'first_Z', ')', '-', '1', 'if', 'end', '==', '0', 'or', 'end', '>', 'maxe', ':', 'end', '=', 'maxe', '# get the MAD, DANG, etc. for directional data', 'bstep', '=', 'araiblock', '[', '0', ']', '[', 'start', ']', '[', '0', ']', 'estep', '=', 'araiblock', '[', '0', ']', '[', 'end', ']', '[', '0', ']', 'zstart', ',', 'zend', '=', '0', ',', 'len', '(', 'zijdblock', ')', 'for', 'k', 'in', 'range', '(', 'len', '(', 'zijdblock', ')', ')', ':', 'zrec', '=', 'zijdblock', '[', 'k', ']', 'if', 'zrec', '[', '0', ']', '==', 'bstep', ':', 'zstart', '=', 'k', 'if', 'zrec', '[', '0', ']', '==', 'estep', ':', 'zend', '=', 'k', 'PCA', '=', 'domean', '(', 'zijdblock', ',', 'zstart', ',', 'zend', ',', "'DE-BFL'", ')', 'D', ',', 'Diz', ',', 'Dzi', ',', 'Du', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', '# list of NRM vectors, and separated by zi and iz', 'for', 'rec', 'in', 'zijdblock', ':', 'D', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ',', 'rec', '[', '3', ']', ')', ')', 'Du', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', 'if', 'rec', '[', '4', ']', '==', '1', ':', 'Dzi', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', '# if this is ZI step', 'else', ':', 'Diz', '.', 'append', '(', '(', 'rec', '[', '1', ']', ',', 'rec', '[', '2', ']', ')', ')', '# if this is IZ step', '# calculate the vector difference sum', 'vds', '=', 'dovds', '(', 'D', ')', 'b_zi', ',', 'b_iz', '=', '[', ']', ',', '[', ']', '# collect data included in ZigZag calculation', 'if', 'end', '+', '1', '>=', 'len', '(', 'first_Z', ')', ':', 'stop', '=', 'end', '-', '1', 'else', ':', 'stop', '=', 'end', 'for', 'k', 'in', 'range', '(', 'start', ',', 'end', '+', '1', ')', ':', 'for', 'l', 'in', 'range', '(', 'len', '(', 'first_I', ')', ')', ':', 'irec', '=', 'first_I', '[', 'l', ']', 'if', 'irec', '[', '0', ']', '==', 'first_Z', '[', 'k', ']', '[', '0', ']', ':', 'xi', '.', 'append', '(', 'irec', '[', '3', ']', ')', 'yi', '.', 'append', '(', 'first_Z', '[', 'k', ']', '[', '3', ']', ')', 'pars', ',', 'errcode', '=', 'int_pars', '(', 'xi', ',', 'yi', ',', 'vds', ')', 'if', 'errcode', '==', '1', ':', 'return', 'pars', ',', 'errcode', '# for k in range(start,end+1):', 'for', 'k', 'in', 'range', '(', 'len', '(', 'first_Z', ')', '-', '1', ')', ':', 'for', 'l', 'in', 'range', '(', 'k', ')', ':', '# only go down to 10% of NRM.....', 'if', 'old_div', '(', 'first_Z', '[', 'k', ']', '[', '3', ']', ',', 'vds', ')', '>', '0.1', ':', 'irec', '=', 'first_I', '[', 'l', ']', 'if', 'irec', '[', '4', ']', '==', '1', 'and', 'first_I', '[', 'l', '+', '1', ']', '[', '4', ']', '==', '0', ':', '# a ZI step', 'xzi', '=', 'irec', '[', '3', ']', 'yzi', '=', 'first_Z', '[', 'k', ']', '[', '3', ']', 'xiz', '=', 'first_I', '[', 'l', '+', '1', ']', '[', '3', ']', 'yiz', '=', 'first_Z', '[', 'k', '+', '1', ']', '[', '3', ']', 'slope', '=', 'np', '.', 'arctan2', '(', '(', 'yzi', '-', 'yiz', ')', ',', '(', 'xiz', '-', 'xzi', ')', ')', 'r', '=', 'np', '.', 'sqrt', '(', '(', 'yzi', '-', 'yiz', ')', '**', '2', '+', '(', 'xiz', '-', 'xzi', ')', '**', '2', ')', 'if', 'r', '>', '.1', '*', 'vds', ':', 'b_zi', '.', 'append', '(', 'slope', ')', '# suppress noise', 'elif', 'irec', '[', '4', ']', '==', '0', 'and', 'first_I', '[', 'l', '+', '1', ']', '[', '4', ']', '==', '1', ':', '# an IZ step', 'xiz', '=', 'irec', '[', '3', ']', 'yiz', '=', 'first_Z', '[', 'k', ']', '[', '3', ']', 'xzi', '=', 'first_I', '[', 'l', '+', '1', ']', '[', '3', ']', 'yzi', '=', 'first_Z', '[', 'k', '+', '1', ']', '[', '3', ']', 'slope', '=', 'np', '.', 'arctan2', '(', '(', 'yiz', '-', 'yzi', ')', ',', '(', 'xzi', '-', 'xiz', ')', ')', 'r', '=', 'np', '.', 'sqrt', '(', '(', 'yiz', '-', 'yzi', ')', '**', '2', '+', '(', 'xzi', '-', 'xiz', ')', '**', '2', ')', 'if', 'r', '>', '.1', '*', 'vds', ':', 'b_iz', '.', 'append', '(', 'slope', ')', '# suppress noise', '#', 'ZigZag', ',', 'Frat', ',', 'Trat', '=', '-', '1', ',', '0', ',', '0', 'if', 'len', '(', 'Diz', ')', '>', '2', 'and', 'len', '(', 'Dzi', ')', '>', '2', ':', 'ZigZag', '=', '0', 'dizp', '=', 'fisher_mean', '(', 'Diz', ')', '# get Fisher stats on IZ steps', 'dzip', '=', 'fisher_mean', '(', 'Dzi', ')', '# get Fisher stats on ZI steps', 'dup', '=', 'fisher_mean', '(', 'Du', ')', '# get Fisher stats on all steps', '#', '# if directions are TOO well grouped, can get false positive for ftest, so', '# angles must be > 3 degrees apart.', '#', 'if', 'angle', '(', '[', 'dizp', '[', "'dec'", ']', ',', 'dizp', '[', "'inc'", ']', ']', ',', '[', 'dzip', '[', "'dec'", ']', ',', 'dzip', '[', "'inc'", ']', ']', ')', '>', '3.', ':', 'F', '=', '(', 'dup', '[', "'n'", ']', '-', '2.', ')', '*', '(', 'dzip', '[', "'r'", ']', '+', 'dizp', '[', "'r'", ']', '-', 'dup', '[', "'r'", ']', ')', '/', '(', 'dup', '[', "'n'", ']', '-', 'dzip', '[', "'r'", ']', '-', 'dizp', '[', "'r'", ']', ')', '# Watson test for common mean', 'nf', '=', '2.', '*', '(', 'dup', '[', "'n'", ']', '-', '2.', ')', '# number of degees of freedom', 'ftest', '=', 'fcalc', '(', '2', ',', 'nf', ')', 'Frat', '=', 'old_div', '(', 'F', ',', 'ftest', ')', 'if', 'Frat', '>', '1.', ':', 'ZigZag', '=', 'Frat', '# fails zigzag on directions', 'methcode', '=', '"SM-FTEST"', '# now do slopes', 'if', 'len', '(', 'b_zi', ')', '>', '2', 'and', 'len', '(', 'b_iz', ')', '>', '2', ':', 'bzi_m', ',', 'bzi_sig', '=', 'gausspars', '(', 'b_zi', ')', '# mean, std dev', 'biz_m', ',', 'biz_sig', '=', 'gausspars', '(', 'b_iz', ')', 'n_zi', '=', 'float', '(', 'len', '(', 'b_zi', ')', ')', 'n_iz', '=', 'float', '(', 'len', '(', 'b_iz', ')', ')', 'b_diff', '=', 'abs', '(', 'bzi_m', '-', 'biz_m', ')', '# difference in means', '#', '# avoid false positives - set 3 degree slope difference here too', 'if', 'b_diff', '>', '3', '*', 'np', '.', 'pi', '/', '180.', ':', 'nf', '=', 'n_zi', '+', 'n_iz', '-', '2.', '# degrees of freedom', 'svar', '=', 'old_div', '(', '(', '(', 'n_zi', '-', '1.', ')', '*', 'bzi_sig', '**', '2', '+', '(', 'n_iz', '-', '1.', ')', '*', 'biz_sig', '**', '2', ')', ',', 'nf', ')', 'T', '=', 'old_div', '(', '(', 'b_diff', ')', ',', 'np', '.', 'sqrt', '(', 'svar', '*', '(', 'old_div', '(', '1.0', ',', 'n_zi', ')', '+', 'old_div', '(', '1.0', ',', 'n_iz', ')', ')', ')', ')', "# student's t", 'ttest', '=', 'tcalc', '(', 'nf', ',', '.05', ')', '# t-test at 95% conf.', 'Trat', '=', 'old_div', '(', 'T', ',', 'ttest', ')', 'if', 'Trat', '>', '1', 'and', 'Trat', '>', 'Frat', ':', 'ZigZag', '=', 'Trat', '# fails zigzag on directions', 'methcode', '=', '"SM-TTEST"', 'pars', '[', 'z_key', ']', '=', 'ZigZag', 'pars', '[', 'meth_key', ']', '=', 'methcode', '# do drats', 'if', 'len', '(', 'ptrm_check', ')', '!=', '0', ':', 'diffcum', ',', 'drat_max', '=', '0', ',', '0', 'for', 'prec', 'in', 'ptrm_check', ':', 'step', '=', 'prec', '[', '0', ']', 'endbak', '=', 'end', 'zend', '=', 'end', 'while', 'zend', '>', 'len', '(', 'zijdblock', ')', '-', '1', ':', 'zend', '=', 'zend', '-', '2', "# don't count alteration that happens after this step", 'if', 'step', '<', 'zijdblock', '[', 'zend', ']', '[', '0', ']', ':', 'Nptrm', '+=', '1', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'diffcum', '+=', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', 'if', 'abs', '(', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', ')', '>', 'drat_max', ':', 'drat_max', '=', 'abs', '(', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', ')', 'pars', '[', 'drats_key', ']', '=', '(', '100', '*', 'abs', '(', 'diffcum', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'pars', '[', 'drat_key', ']', '=', '(', '100', '*', 'abs', '(', 'drat_max', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'elif', 'len', '(', 'zptrm_check', ')', '!=', '0', ':', 'diffcum', '=', '0', 'for', 'prec', 'in', 'zptrm_check', ':', 'step', '=', 'prec', '[', '0', ']', 'endbak', '=', 'end', 'zend', '=', 'end', 'while', 'zend', '>', 'len', '(', 'zijdblock', ')', '-', '1', ':', 'zend', '=', 'zend', '-', '1', 'if', 'step', '<', 'zijdblock', '[', 'zend', ']', '[', '0', ']', ':', 'Nptrm', '+=', '1', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'diffcum', '+=', 'prec', '[', '3', ']', '-', 'irec', '[', '3', ']', 'pars', '[', 'drats_key', ']', '=', '(', '100', '*', 'abs', '(', 'diffcum', ')', '/', 'first_I', '[', 'zend', ']', '[', '3', ']', ')', 'else', ':', 'pars', '[', 'drats_key', ']', '=', '-', '1', 'pars', '[', 'drat_key', ']', '=', '-', '1', '# and the pTRM tails', 'if', 'len', '(', 'ptrm_tail', ')', '!=', '0', ':', 'for', 'trec', 'in', 'ptrm_tail', ':', 'step', '=', 'trec', '[', '0', ']', 'for', 'irec', 'in', 'first_I', ':', 'if', 'irec', '[', '0', ']', '==', 'step', ':', 'break', 'if', 'abs', '(', 'trec', '[', '3', ']', ')', '>', 'dmax', ':', 'dmax', '=', 'abs', '(', 'trec', '[', '3', ']', ')', 'pars', '[', 'md_key', ']', '=', '(', '100', '*', 'dmax', '/', 'vds', ')', 'else', ':', 'pars', '[', 'md_key', ']', '=', '-', '1', 'pars', '[', 'min_key', ']', '=', 'bstep', 'pars', '[', 'max_key', ']', '=', 'estep', 'pars', '[', 'dec_key', ']', '=', 'PCA', '[', '"specimen_dec"', ']', 'pars', '[', 'inc_key', ']', '=', 'PCA', '[', '"specimen_inc"', ']', 'pars', '[', 'mad_key', ']', '=', 'PCA', '[', '"specimen_mad"', ']', 'pars', '[', 'dang_key', ']', '=', 'PCA', '[', '"specimen_dang"', ']', 'pars', '[', 'ptrm_key', ']', '=', 'Nptrm', '# and the ThetaChecks', 'if', 'ThetaChecks', '!=', '""', ':', 't', '=', '0', 'for', 'theta', 'in', 'ThetaChecks', ':', 'if', 'theta', '[', '0', ']', '>=', 'bstep', 'and', 'theta', '[', '0', ']', '<=', 'estep', 'and', 'theta', '[', '1', ']', '>', 't', ':', 't', '=', 'theta', '[', '1', ']', 'pars', '[', 'theta_key', ']', '=', 't', 'else', ':', 'pars', '[', 'theta_key', ']', '=', '-', '1', '# and the DeltaChecks', 'if', 'DeltaChecks', '!=', '""', ':', 'd', '=', '0', 'for', 'delta', 'in', 'DeltaChecks', ':', 'if', 'delta', '[', '0', ']', '>=', 'bstep', 'and', 'delta', '[', '0', ']', '<=', 'estep', 'and', 'delta', '[', '1', ']', '>', 'd', ':', 'd', '=', 'delta', '[', '1', ']', 'pars', '[', 'delta_key', ']', 'else', ':', 'pars', '[', 'delta_key', ']', '=', '-', '1', 'pars', '[', 'gamma_key', ']', '=', '-', '1', 'if', 'GammaChecks', '!=', '""', ':', 'for', 'gamma', 'in', 'GammaChecks', ':', 'if', 'gamma', '[', '0', ']', '<=', 'estep', ':', 'pars', '[', "'specimen_gamma'", ']', '=', 'gamma', '[', '1', ']', '# --------------------------------------------------------------', '# From here added By Ron Shaar 11-Dec 2012', '# New parameters defined in Shaar and Tauxe (2012):', '# FRAC (specimen_frac) - ranges from 0. to 1.', '# SCAT (specimen_scat) - takes 1/0', '# gap_max (specimen_gmax) - ranges from 0. to 1.', '# --------------------------------------------------------------', '# --------------------------------------------------------------', '# FRAC is similar to Fvds, but the numerator is the vds fraction:', '# FRAC= [ vds (start,end)] / total vds ]', '# gap_max= max [ (vector difference) / vds (start,end)]', '# --------------------------------------------------------------', '# collect all zijderveld data to arrays and calculate VDS', 'z_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'zijdblock', ']', 'zdata', '=', '[', ']', '# array of zero-fields measurements in Cartezian coordinates', '# array of vector differences (for vds calculation)', 'vector_diffs', '=', '[', ']', 'NRM', '=', 'zijdblock', '[', '0', ']', '[', '3', ']', '# NRM', 'for', 'k', 'in', 'range', '(', 'len', '(', 'zijdblock', ')', ')', ':', 'DIR', '=', '[', 'zijdblock', '[', 'k', ']', '[', '1', ']', ',', 'zijdblock', '[', 'k', ']', '[', '2', ']', ',', 'old_div', '(', 'zijdblock', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ']', 'cart', '=', 'dir2cart', '(', 'DIR', ')', 'zdata', '.', 'append', '(', 'np', '.', 'array', '(', '[', 'cart', '[', '0', ']', ',', 'cart', '[', '1', ']', ',', 'cart', '[', '2', ']', ']', ')', ')', 'if', 'k', '>', '0', ':', 'vector_diffs', '.', 'append', '(', 'np', '.', 'sqrt', '(', 'sum', '(', '(', 'np', '.', 'array', '(', 'zdata', '[', '-', '2', ']', ')', '-', 'np', '.', 'array', '(', 'zdata', '[', '-', '1', ']', ')', ')', '**', '2', ')', ')', ')', '# last vector difference: from the last point to the origin.', 'vector_diffs', '.', 'append', '(', 'np', '.', 'sqrt', '(', 'sum', '(', 'np', '.', 'array', '(', 'zdata', '[', '-', '1', ']', ')', '**', '2', ')', ')', ')', 'vds', '=', 'sum', '(', 'vector_diffs', ')', '# vds calculation', 'zdata', '=', 'np', '.', 'array', '(', 'zdata', ')', 'vector_diffs', '=', 'np', '.', 'array', '(', 'vector_diffs', ')', '# calculate the vds within the chosen segment', 'vector_diffs_segment', '=', 'vector_diffs', '[', 'zstart', ':', 'zend', ']', '# FRAC calculation', 'FRAC', '=', 'old_div', '(', 'sum', '(', 'vector_diffs_segment', ')', ',', 'vds', ')', 'pars', '[', 'frac_key', ']', '=', 'FRAC', '# gap_max calculation', 'max_FRAC_gap', '=', 'max', '(', 'old_div', '(', 'vector_diffs_segment', ',', 'sum', '(', 'vector_diffs_segment', ')', ')', ')', 'pars', '[', 'gmax_key', ']', '=', 'max_FRAC_gap', '# ---------------------------------------------------------------------', '# Calculate the "scat box"', '# all data-points, pTRM checks, and tail-checks, should be inside a "scat box"', '# ---------------------------------------------------------------------', '# intialization', '# fail scat due to arai plot data points', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', '=', 'False', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', '=', 'False', '# fail scat due to pTRM checks', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', '=', 'False', '# fail scat due to tail checks', 'pars', '[', 'scat_key', ']', '=', '"t"', '# Pass by default', '# --------------------------------------------------------------', '# collect all Arai plot data points in arrays', 'x_Arai', ',', 'y_Arai', ',', 't_Arai', ',', 'steps_Arai', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'NRMs', '=', 'araiblock', '[', '0', ']', 'PTRMs', '=', 'araiblock', '[', '1', ']', 'ptrm_checks', '=', 'araiblock', '[', '2', ']', 'ptrm_tail', '=', 'araiblock', '[', '3', ']', 'PTRMs_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'PTRMs', ']', 'NRMs_temperatures', '=', '[', 'row', '[', '0', ']', 'for', 'row', 'in', 'NRMs', ']', 'NRM', '=', 'NRMs', '[', '0', ']', '[', '3', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'NRMs', ')', ')', ':', 'index_pTRMs', '=', 'PTRMs_temperatures', '.', 'index', '(', 'NRMs', '[', 'k', ']', '[', '0', ']', ')', 'x_Arai', '.', 'append', '(', 'old_div', '(', 'PTRMs', '[', 'index_pTRMs', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_Arai', '.', 'append', '(', 'old_div', '(', 'NRMs', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ')', 't_Arai', '.', 'append', '(', 'NRMs', '[', 'k', ']', '[', '0', ']', ')', 'if', 'NRMs', '[', 'k', ']', '[', '4', ']', '==', '1', ':', 'steps_Arai', '.', 'append', '(', "'ZI'", ')', 'else', ':', 'steps_Arai', '.', 'append', '(', "'IZ'", ')', 'x_Arai', '=', 'np', '.', 'array', '(', 'x_Arai', ')', 'y_Arai', '=', 'np', '.', 'array', '(', 'y_Arai', ')', '# --------------------------------------------------------------', '# collect all pTRM check to arrays', 'x_ptrm_check', ',', 'y_ptrm_check', ',', 'ptrm_checks_temperatures', ',', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'x_ptrm_check_starting_point', ',', 'y_ptrm_check_starting_point', ',', 'ptrm_checks_starting_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_checks', ')', ')', ':', 'if', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', 'in', 'NRMs_temperatures', ':', '# find the starting point of the pTRM check:', 'for', 'i', 'in', 'range', '(', 'len', '(', 'datablock', ')', ')', ':', 'rec', '=', 'datablock', '[', 'i', ']', 'if', '"LT-PTRM-I"', 'in', 'rec', '[', 'meth_key', ']', 'and', 'float', '(', 'rec', '[', 'temp_key', ']', ')', '==', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ':', 'starting_temperature', '=', '(', 'float', '(', 'datablock', '[', 'i', '-', '1', ']', '[', 'temp_key', ']', ')', ')', 'try', ':', 'index', '=', 't_Arai', '.', 'index', '(', 'starting_temperature', ')', 'x_ptrm_check_starting_point', '.', 'append', '(', 'x_Arai', '[', 'index', ']', ')', 'y_ptrm_check_starting_point', '.', 'append', '(', 'y_Arai', '[', 'index', ']', ')', 'ptrm_checks_starting_temperatures', '.', 'append', '(', 'starting_temperature', ')', 'index_zerofield', '=', 'zerofield_temperatures', '.', 'index', '(', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ')', 'x_ptrm_check', '.', 'append', '(', 'old_div', '(', 'ptrm_checks', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_ptrm_check', '.', 'append', '(', 'old_div', '(', 'zerofields', '[', 'index_zerofield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'ptrm_checks_temperatures', '.', 'append', '(', 'ptrm_checks', '[', 'k', ']', '[', '0', ']', ')', 'break', 'except', ':', 'pass', 'x_ptrm_check_starting_point', '=', 'np', '.', 'array', '(', 'x_ptrm_check_starting_point', ')', 'y_ptrm_check_starting_point', '=', 'np', '.', 'array', '(', 'y_ptrm_check_starting_point', ')', 'ptrm_checks_starting_temperatures', '=', 'np', '.', 'array', '(', 'ptrm_checks_starting_temperatures', ')', 'x_ptrm_check', '=', 'np', '.', 'array', '(', 'x_ptrm_check', ')', 'y_ptrm_check', '=', 'np', '.', 'array', '(', 'y_ptrm_check', ')', 'ptrm_checks_temperatures', '=', 'np', '.', 'array', '(', 'ptrm_checks_temperatures', ')', '# --------------------------------------------------------------', '# collect tail checks to arrays', 'x_tail_check', ',', 'y_tail_check', ',', 'tail_check_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'x_tail_check_starting_point', ',', 'y_tail_check_starting_point', ',', 'tail_checks_starting_temperatures', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_tail', ')', ')', ':', 'if', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', 'in', 'NRMs_temperatures', ':', '# find the starting point of the pTRM check:', 'for', 'i', 'in', 'range', '(', 'len', '(', 'datablock', ')', ')', ':', 'rec', '=', 'datablock', '[', 'i', ']', 'if', '"LT-PTRM-MD"', 'in', 'rec', '[', 'meth_key', ']', 'and', 'float', '(', 'rec', '[', 'temp_key', ']', ')', '==', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ':', 'starting_temperature', '=', '(', 'float', '(', 'datablock', '[', 'i', '-', '1', ']', '[', 'temp_key', ']', ')', ')', 'try', ':', 'index', '=', 't_Arai', '.', 'index', '(', 'starting_temperature', ')', 'x_tail_check_starting_point', '.', 'append', '(', 'x_Arai', '[', 'index', ']', ')', 'y_tail_check_starting_point', '.', 'append', '(', 'y_Arai', '[', 'index', ']', ')', 'tail_checks_starting_temperatures', '.', 'append', '(', 'starting_temperature', ')', 'index_infield', '=', 'infield_temperatures', '.', 'index', '(', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ')', 'x_tail_check', '.', 'append', '(', 'old_div', '(', 'infields', '[', 'index_infield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'y_tail_check', '.', 'append', '(', 'old_div', '(', 'ptrm_tail', '[', 'k', ']', '[', '3', ']', ',', 'NRM', ')', '+', 'old_div', '(', 'zerofields', '[', 'index_infield', ']', '[', '3', ']', ',', 'NRM', ')', ')', 'tail_check_temperatures', '.', 'append', '(', 'ptrm_tail', '[', 'k', ']', '[', '0', ']', ')', 'break', 'except', ':', 'pass', 'x_tail_check', '=', 'np', '.', 'array', '(', 'x_tail_check', ')', 'y_tail_check', '=', 'np', '.', 'array', '(', 'y_tail_check', ')', 'tail_check_temperatures', '=', 'np', '.', 'array', '(', 'tail_check_temperatures', ')', 'x_tail_check_starting_point', '=', 'np', '.', 'array', '(', 'x_tail_check_starting_point', ')', 'y_tail_check_starting_point', '=', 'np', '.', 'array', '(', 'y_tail_check_starting_point', ')', 'tail_checks_starting_temperatures', '=', 'np', '.', 'array', '(', 'tail_checks_starting_temperatures', ')', '# --------------------------------------------------------------', '# collect the chosen segment in the Arai plot to arrays', 'x_Arai_segment', '=', 'x_Arai', '[', 'start', ':', 'end', '+', '1', ']', '# chosen segent in the Arai plot', 'y_Arai_segment', '=', 'y_Arai', '[', 'start', ':', 'end', '+', '1', ']', '# chosen segent in the Arai plot', '# --------------------------------------------------------------', '# collect pTRM checks in segment to arrays', '# notice, this is different than the conventional DRATS.', '# for scat calculation we take only the pTRM checks which were carried out', '# before reaching the highest temperature in the chosen segment', 'x_ptrm_check_for_SCAT', ',', 'y_ptrm_check_for_SCAT', '=', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'ptrm_checks_temperatures', ')', ')', ':', 'if', 'ptrm_checks_temperatures', '[', 'k', ']', '>=', 'pars', '[', 'min_key', ']', 'and', 'ptrm_checks_starting_temperatures', '<=', 'pars', '[', 'max_key', ']', ':', 'x_ptrm_check_for_SCAT', '.', 'append', '(', 'x_ptrm_check', '[', 'k', ']', ')', 'y_ptrm_check_for_SCAT', '.', 'append', '(', 'y_ptrm_check', '[', 'k', ']', ')', 'x_ptrm_check_for_SCAT', '=', 'np', '.', 'array', '(', 'x_ptrm_check_for_SCAT', ')', 'y_ptrm_check_for_SCAT', '=', 'np', '.', 'array', '(', 'y_ptrm_check_for_SCAT', ')', '# --------------------------------------------------------------', '# collect Tail checks in segment to arrays', '# for scat calculation we take only the tail checks which were carried out', '# before reaching the highest temperature in the chosen segment', 'x_tail_check_for_SCAT', ',', 'y_tail_check_for_SCAT', '=', '[', ']', ',', '[', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'tail_check_temperatures', ')', ')', ':', 'if', 'tail_check_temperatures', '[', 'k', ']', '>=', 'pars', '[', 'min_key', ']', 'and', 'tail_checks_starting_temperatures', '[', 'k', ']', '<=', 'pars', '[', 'max_key', ']', ':', 'x_tail_check_for_SCAT', '.', 'append', '(', 'x_tail_check', '[', 'k', ']', ')', 'y_tail_check_for_SCAT', '.', 'append', '(', 'y_tail_check', '[', 'k', ']', ')', 'x_tail_check_for_SCAT', '=', 'np', '.', 'array', '(', 'x_tail_check_for_SCAT', ')', 'y_tail_check_for_SCAT', '=', 'np', '.', 'array', '(', 'y_tail_check_for_SCAT', ')', '# --------------------------------------------------------------', '# calculate the lines that define the scat box:', '# if threshold value for beta is not defined, then scat cannot be calculated (pass)', '# in this case, scat pass', 'if', 'beta_key', 'in', 'list', '(', 'accept', '.', 'keys', '(', ')', ')', 'and', 'accept', '[', 'beta_key', ']', '!=', '""', ':', 'b_beta_threshold', '=', 'float', '(', 'accept', '[', 'beta_key', ']', ')', 'b', '=', 'pars', '[', 'b_key', ']', '# best fit line', 'cm_x', '=', 'np', '.', 'mean', '(', 'np', '.', 'array', '(', 'x_Arai_segment', ')', ')', '# x center of mass', 'cm_y', '=', 'np', '.', 'mean', '(', 'np', '.', 'array', '(', 'y_Arai_segment', ')', ')', '# y center of mass', 'a', '=', 'cm_y', '-', 'b', '*', 'cm_x', '# lines with slope = slope +/- 2*(specimen_b_beta)', 'two_sigma_beta_threshold', '=', '2', '*', 'b_beta_threshold', 'two_sigma_slope_threshold', '=', 'abs', '(', 'two_sigma_beta_threshold', '*', 'b', ')', '# a line with a shallower slope (b + 2*beta*b) passing through the center of mass', '# y=a1+b1x', 'b1', '=', 'b', '+', 'two_sigma_slope_threshold', 'a1', '=', 'cm_y', '-', 'b1', '*', 'cm_x', '# bounding line with steeper slope (b - 2*beta*b) passing through the center of mass', '# y=a2+b2x', 'b2', '=', 'b', '-', 'two_sigma_slope_threshold', 'a2', '=', 'cm_y', '-', 'b2', '*', 'cm_x', "# lower bounding line of the 'beta box'", '# y=intercept1+slop1x', 'slop1', '=', 'old_div', '(', 'a1', ',', '(', '(', 'old_div', '(', 'a2', ',', 'b2', ')', ')', ')', ')', 'intercept1', '=', 'a1', "# higher bounding line of the 'beta box'", '# y=intercept2+slop2x', 'slop2', '=', 'old_div', '(', 'a2', ',', '(', '(', 'old_div', '(', 'a1', ',', 'b1', ')', ')', ')', ')', 'intercept2', '=', 'a2', 'pars', '[', "'specimen_scat_bounding_line_high'", ']', '=', '[', 'intercept2', ',', 'slop2', ']', 'pars', '[', "'specimen_scat_bounding_line_low'", ']', '=', '[', 'intercept1', ',', 'slop1', ']', '# --------------------------------------------------------------', "# check if the Arai data points are in the 'box'", '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_Arai_segment', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_Arai_segment', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_Arai_segment', '>', 'ymax', 'check_2', '=', 'y_Arai_segment', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', "# check if the pTRM checks data points are in the 'box'", 'if', 'len', '(', 'x_ptrm_check_for_SCAT', ')', '>', '0', ':', '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_ptrm_check_for_SCAT', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_ptrm_check_for_SCAT', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_ptrm_check_for_SCAT', '>', 'ymax', 'check_2', '=', 'y_ptrm_check_for_SCAT', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', "# check if the tail checks data points are in the 'box'", 'if', 'len', '(', 'x_tail_check_for_SCAT', ')', '>', '0', ':', '# the two bounding lines', 'ymin', '=', 'intercept1', '+', 'x_tail_check_for_SCAT', '*', 'slop1', 'ymax', '=', 'intercept2', '+', 'x_tail_check_for_SCAT', '*', 'slop2', '# arrays of "True" or "False"', 'check_1', '=', 'y_tail_check_for_SCAT', '>', 'ymax', 'check_2', '=', 'y_tail_check_for_SCAT', '<', 'ymin', '# check if at least one "True"', 'if', '(', 'sum', '(', 'check_1', ')', '+', 'sum', '(', 'check_2', ')', ')', '>', '0', ':', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', '=', 'True', '# --------------------------------------------------------------', '# check if specimen_scat is PASS or FAIL:', 'if', 'pars', '[', '"fail_tail_beta_box_scatter"', ']', 'or', 'pars', '[', '"fail_ptrm_beta_box_scatter"', ']', 'or', 'pars', '[', '"fail_arai_beta_box_scatter"', ']', ':', 'pars', '[', 'scat_key', ']', '=', "'f'", 'else', ':', 'pars', '[', 'scat_key', ']', '=', "'t'", 'return', 'pars', ',', '0']"
"calculate the paleointensity magic parameters make some definitions"
"['calculate', 'the', 'paleointensity', 'magic', 'parameters', 'make', 'some', 'definitions']"
"train"
"https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L2827-L3374"
10
"lowandrew/OLCTools"
"spadespipeline/vtyper.py"
"Vtyper.epcrparse"
"def epcrparse(self): """ Parse the ePCR text file outputs """ logging.info('Parsing ePCR results') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': if 'stx' in sample.general.datastore: # Initialise count - this allows for the population of vtyperresults with unique values uniquecount = 0 # This populates vtyperresults with the verotoxin subtypes toxinlist = [] if os.path.isfile(sample[self.analysistype].resultsfile): epcrresults = open(sample[self.analysistype].resultsfile, 'r') for result in epcrresults: # Only the lines without a # contain results if "#" not in result: uniquecount += 1 # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Push the name of the primer pair - stripped of anything after a _ to the dictionary if vttype not in toxinlist: toxinlist.append(vttype) # Create a string of the entries in list1 joined with ";" toxinstring = ";".join(sorted(toxinlist)) # Save the string to the metadata sample[self.analysistype].toxinprofile = toxinstring else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'"
"python"
"def epcrparse(self): """ Parse the ePCR text file outputs """ logging.info('Parsing ePCR results') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': if 'stx' in sample.general.datastore: # Initialise count - this allows for the population of vtyperresults with unique values uniquecount = 0 # This populates vtyperresults with the verotoxin subtypes toxinlist = [] if os.path.isfile(sample[self.analysistype].resultsfile): epcrresults = open(sample[self.analysistype].resultsfile, 'r') for result in epcrresults: # Only the lines without a # contain results if "#" not in result: uniquecount += 1 # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Push the name of the primer pair - stripped of anything after a _ to the dictionary if vttype not in toxinlist: toxinlist.append(vttype) # Create a string of the entries in list1 joined with ";" toxinstring = ";".join(sorted(toxinlist)) # Save the string to the metadata sample[self.analysistype].toxinprofile = toxinstring else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'"
"['def', 'epcrparse', '(', 'self', ')', ':', 'logging', '.', 'info', '(', "'Parsing ePCR results'", ')', 'for', 'sample', 'in', 'self', '.', 'metadata', ':', 'if', 'sample', '.', 'general', '.', 'bestassemblyfile', '!=', "'NA'", ':', 'if', "'stx'", 'in', 'sample', '.', 'general', '.', 'datastore', ':', '# Initialise count - this allows for the population of vtyperresults with unique values', 'uniquecount', '=', '0', '# This populates vtyperresults with the verotoxin subtypes', 'toxinlist', '=', '[', ']', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'resultsfile', ')', ':', 'epcrresults', '=', 'open', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'resultsfile', ',', "'r'", ')', 'for', 'result', 'in', 'epcrresults', ':', '# Only the lines without a # contain results', 'if', '"#"', 'not', 'in', 'result', ':', 'uniquecount', '+=', '1', '# Split on \\t', 'data', '=', 'result', '.', 'split', '(', "'\\t'", ')', '# The subtyping primer pair is the first entry on lines with results', 'vttype', '=', 'data', '[', '0', ']', '.', 'split', '(', "'_'", ')', '[', '0', ']', '# Push the name of the primer pair - stripped of anything after a _ to the dictionary', 'if', 'vttype', 'not', 'in', 'toxinlist', ':', 'toxinlist', '.', 'append', '(', 'vttype', ')', '# Create a string of the entries in list1 joined with ";"', 'toxinstring', '=', '";"', '.', 'join', '(', 'sorted', '(', 'toxinlist', ')', ')', '# Save the string to the metadata', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', 'toxinstring', 'else', ':', 'setattr', '(', 'sample', ',', 'self', '.', 'analysistype', ',', 'GenObject', '(', ')', ')', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', "'NA'", 'else', ':', 'setattr', '(', 'sample', ',', 'self', '.', 'analysistype', ',', 'GenObject', '(', ')', ')', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'toxinprofile', '=', "'NA'"]"
"Parse the ePCR text file outputs"
"['Parse', 'the', 'ePCR', 'text', 'file', 'outputs']"
"train"
"https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/vtyper.py#L96-L131"
11
"atztogo/phonopy"
"phonopy/structure/spglib.py"
"get_pointgroup"
"def get_pointgroup(rotations): """Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m " """ _set_no_error() # (symbol, pointgroup_number, transformation_matrix) pointgroup = spg.pointgroup(np.array(rotations, dtype='intc', order='C')) _set_error_message() return pointgroup"
"python"
"def get_pointgroup(rotations): """Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m " """ _set_no_error() # (symbol, pointgroup_number, transformation_matrix) pointgroup = spg.pointgroup(np.array(rotations, dtype='intc', order='C')) _set_error_message() return pointgroup"
"['def', 'get_pointgroup', '(', 'rotations', ')', ':', '_set_no_error', '(', ')', '# (symbol, pointgroup_number, transformation_matrix)', 'pointgroup', '=', 'spg', '.', 'pointgroup', '(', 'np', '.', 'array', '(', 'rotations', ',', 'dtype', '=', "'intc'", ',', 'order', '=', "'C'", ')', ')', '_set_error_message', '(', ')', 'return', 'pointgroup']"
"Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m ""
"['Return', 'point', 'group', 'in', 'international', 'table', 'symbol', 'and', 'number', '.']"
"train"
"https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/spglib.py#L301-L343"
12
"automl/HpBandSter"
"hpbandster/core/nameserver.py"
"NameServer.start"
"def start(self): """ starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port """ if self.host is None: if self.nic_name is None: self.host = 'localhost' else: self.host = nic_name_to_host(self.nic_name) uri, self.pyro_ns, _ = Pyro4.naming.startNS(host=self.host, port=self.port) self.host, self.port = self.pyro_ns.locationStr.split(':') self.port = int(self.port) thread = threading.Thread(target=self.pyro_ns.requestLoop, name='Pyro4 nameserver started by HpBandSter') thread.start() if not self.dir is None: os.makedirs(self.dir, exist_ok=True) self.conf_fn = os.path.join(self.dir, 'HPB_run_%s_pyro.pkl'%self.run_id) with open(self.conf_fn, 'wb') as fh: pickle.dump((self.host, self.port), fh) return(self.host, self.port)"
"python"
"def start(self): """ starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port """ if self.host is None: if self.nic_name is None: self.host = 'localhost' else: self.host = nic_name_to_host(self.nic_name) uri, self.pyro_ns, _ = Pyro4.naming.startNS(host=self.host, port=self.port) self.host, self.port = self.pyro_ns.locationStr.split(':') self.port = int(self.port) thread = threading.Thread(target=self.pyro_ns.requestLoop, name='Pyro4 nameserver started by HpBandSter') thread.start() if not self.dir is None: os.makedirs(self.dir, exist_ok=True) self.conf_fn = os.path.join(self.dir, 'HPB_run_%s_pyro.pkl'%self.run_id) with open(self.conf_fn, 'wb') as fh: pickle.dump((self.host, self.port), fh) return(self.host, self.port)"
"['def', 'start', '(', 'self', ')', ':', 'if', 'self', '.', 'host', 'is', 'None', ':', 'if', 'self', '.', 'nic_name', 'is', 'None', ':', 'self', '.', 'host', '=', "'localhost'", 'else', ':', 'self', '.', 'host', '=', 'nic_name_to_host', '(', 'self', '.', 'nic_name', ')', 'uri', ',', 'self', '.', 'pyro_ns', ',', '_', '=', 'Pyro4', '.', 'naming', '.', 'startNS', '(', 'host', '=', 'self', '.', 'host', ',', 'port', '=', 'self', '.', 'port', ')', 'self', '.', 'host', ',', 'self', '.', 'port', '=', 'self', '.', 'pyro_ns', '.', 'locationStr', '.', 'split', '(', "':'", ')', 'self', '.', 'port', '=', 'int', '(', 'self', '.', 'port', ')', 'thread', '=', 'threading', '.', 'Thread', '(', 'target', '=', 'self', '.', 'pyro_ns', '.', 'requestLoop', ',', 'name', '=', "'Pyro4 nameserver started by HpBandSter'", ')', 'thread', '.', 'start', '(', ')', 'if', 'not', 'self', '.', 'dir', 'is', 'None', ':', 'os', '.', 'makedirs', '(', 'self', '.', 'dir', ',', 'exist_ok', '=', 'True', ')', 'self', '.', 'conf_fn', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'dir', ',', "'HPB_run_%s_pyro.pkl'", '%', 'self', '.', 'run_id', ')', 'with', 'open', '(', 'self', '.', 'conf_fn', ',', "'wb'", ')', 'as', 'fh', ':', 'pickle', '.', 'dump', '(', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', ',', 'fh', ')', 'return', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')']"
"starts a Pyro4 nameserver in a separate thread Returns ------- tuple (str, int): the host name and the used port"
"['starts', 'a', 'Pyro4', 'nameserver', 'in', 'a', 'separate', 'thread', 'Returns', '-------', 'tuple', '(', 'str', 'int', ')', ':', 'the', 'host', 'name', 'and', 'the', 'used', 'port']"
"train"
"https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/nameserver.py#L48-L79"
13
"PixelwarStudio/PyTree"
"Tree/core.py"
"Tree.grow"
"def grow(self, times=1): """Let the tree grow. Args: times (integer): Indicate how many times the tree will grow. """ self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)"
"python"
"def grow(self, times=1): """Let the tree grow. Args: times (integer): Indicate how many times the tree will grow. """ self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)"
"['def', 'grow', '(', 'self', ',', 'times', '=', '1', ')', ':', 'self', '.', 'nodes', '.', 'append', '(', '[', ']', ')', 'for', 'n', ',', 'node', 'in', 'enumerate', '(', 'self', '.', 'nodes', '[', 'self', '.', 'age', ']', ')', ':', 'if', 'self', '.', 'age', '==', '0', ':', 'p_node', '=', 'Node', '(', 'self', '.', 'pos', '[', ':', '2', ']', ')', 'else', ':', 'p_node', '=', 'self', '.', '_get_node_parent', '(', 'self', '.', 'age', '-', '1', ',', 'n', ')', 'angle', '=', 'node', '.', 'get_node_angle', '(', 'p_node', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'comp', ')', ':', 'tot_angle', '=', 'self', '.', '__get_total_angle', '(', 'angle', ',', 'i', ')', 'length', '=', 'self', '.', '__get_total_length', '(', 'self', '.', 'age', '+', '1', ',', 'i', ')', 'self', '.', 'nodes', '[', 'self', '.', 'age', '+', '1', ']', '.', 'append', '(', 'node', '.', 'make_new_node', '(', 'length', ',', 'tot_angle', ')', ')', 'self', '.', 'age', '+=', '1', 'if', 'times', '>', '1', ':', 'self', '.', 'grow', '(', 'times', '-', '1', ')']"
"Let the tree grow. Args: times (integer): Indicate how many times the tree will grow."
"['Let', 'the', 'tree', 'grow', '.']"
"train"
"https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L167-L189"
14
"saltstack/salt"
"salt/modules/osquery.py"
"kernel_integrity"
"def kernel_integrity(attrs=None, where=None): ''' Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity ''' if __grains__['os_family'] in ['RedHat', 'Debian']: return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}"
"python"
"def kernel_integrity(attrs=None, where=None): ''' Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity ''' if __grains__['os_family'] in ['RedHat', 'Debian']: return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}"
"['def', 'kernel_integrity', '(', 'attrs', '=', 'None', ',', 'where', '=', 'None', ')', ':', 'if', '__grains__', '[', "'os_family'", ']', 'in', '[', "'RedHat'", ',', "'Debian'", ']', ':', 'return', '_osquery_cmd', '(', 'table', '=', "'kernel_integrity'", ',', 'attrs', '=', 'attrs', ',', 'where', '=', 'where', ')', 'return', '{', "'result'", ':', 'False', ',', "'comment'", ':', "'Only available on Red Hat or Debian based systems.'", '}']"
"Return kernel_integrity information from osquery CLI Example: .. code-block:: bash salt '*' osquery.kernel_integrity"
"['Return', 'kernel_integrity', 'information', 'from', 'osquery']"
"train"
"https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/osquery.py#L149-L161"
15
"rackerlabs/lambda-uploader"
"lambda_uploader/package.py"
"Package.virtualenv"
"def virtualenv(self, virtualenv): ''' Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped. ''' # If a boolean is passed then set the internal _skip_virtualenv flag if isinstance(virtualenv, bool): self._skip_virtualenv = virtualenv else: self._virtualenv = virtualenv if not os.path.isdir(self._virtualenv): raise Exception("virtualenv %s not found" % self._virtualenv) LOG.info("Using existing virtualenv at %s" % self._virtualenv) # use supplied virtualenv path self._pkg_venv = self._virtualenv self._skip_virtualenv = True"
"python"
"def virtualenv(self, virtualenv): ''' Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped. ''' # If a boolean is passed then set the internal _skip_virtualenv flag if isinstance(virtualenv, bool): self._skip_virtualenv = virtualenv else: self._virtualenv = virtualenv if not os.path.isdir(self._virtualenv): raise Exception("virtualenv %s not found" % self._virtualenv) LOG.info("Using existing virtualenv at %s" % self._virtualenv) # use supplied virtualenv path self._pkg_venv = self._virtualenv self._skip_virtualenv = True"
"['def', 'virtualenv', '(', 'self', ',', 'virtualenv', ')', ':', '# If a boolean is passed then set the internal _skip_virtualenv flag', 'if', 'isinstance', '(', 'virtualenv', ',', 'bool', ')', ':', 'self', '.', '_skip_virtualenv', '=', 'virtualenv', 'else', ':', 'self', '.', '_virtualenv', '=', 'virtualenv', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'self', '.', '_virtualenv', ')', ':', 'raise', 'Exception', '(', '"virtualenv %s not found"', '%', 'self', '.', '_virtualenv', ')', 'LOG', '.', 'info', '(', '"Using existing virtualenv at %s"', '%', 'self', '.', '_virtualenv', ')', '# use supplied virtualenv path', 'self', '.', '_pkg_venv', '=', 'self', '.', '_virtualenv', 'self', '.', '_skip_virtualenv', '=', 'True']"
"Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped."
"['Sets', 'the', 'virtual', 'environment', 'for', 'the', 'lambda', 'package']"
"train"
"https://github.com/rackerlabs/lambda-uploader/blob/a5036e60d45d1a4fdc07df071f5b6e3b113388d4/lambda_uploader/package.py#L114-L133"
16
"agoragames/kairos"
"kairos/cassandra_backend.py"
"CassandraSet._insert_stmt"
"def _insert_stmt(self, name, value, timestamp, interval, config): '''Helper to generate the insert statement.''' # Calculate the TTL and abort if inserting into the past expire, ttl = config['expire'], config['ttl'](timestamp) if expire and not ttl: return None i_time = config['i_calc'].to_bucket(timestamp) if not config['coarse']: r_time = config['r_calc'].to_bucket(timestamp) else: r_time = -1 # TODO: figure out escaping rules of CQL stmt = '''INSERT INTO %s (name, interval, i_time, r_time, value) VALUES ('%s', '%s', %s, %s, %s)'''%(self._table, name, interval, i_time, r_time, value) expire = config['expire'] if ttl: stmt += " USING TTL %s"%(ttl) return stmt"
"python"
"def _insert_stmt(self, name, value, timestamp, interval, config): '''Helper to generate the insert statement.''' # Calculate the TTL and abort if inserting into the past expire, ttl = config['expire'], config['ttl'](timestamp) if expire and not ttl: return None i_time = config['i_calc'].to_bucket(timestamp) if not config['coarse']: r_time = config['r_calc'].to_bucket(timestamp) else: r_time = -1 # TODO: figure out escaping rules of CQL stmt = '''INSERT INTO %s (name, interval, i_time, r_time, value) VALUES ('%s', '%s', %s, %s, %s)'''%(self._table, name, interval, i_time, r_time, value) expire = config['expire'] if ttl: stmt += " USING TTL %s"%(ttl) return stmt"
"['def', '_insert_stmt', '(', 'self', ',', 'name', ',', 'value', ',', 'timestamp', ',', 'interval', ',', 'config', ')', ':', '# Calculate the TTL and abort if inserting into the past', 'expire', ',', 'ttl', '=', 'config', '[', "'expire'", ']', ',', 'config', '[', "'ttl'", ']', '(', 'timestamp', ')', 'if', 'expire', 'and', 'not', 'ttl', ':', 'return', 'None', 'i_time', '=', 'config', '[', "'i_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', 'if', 'not', 'config', '[', "'coarse'", ']', ':', 'r_time', '=', 'config', '[', "'r_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', 'else', ':', 'r_time', '=', '-', '1', '# TODO: figure out escaping rules of CQL', 'stmt', '=', "'''INSERT INTO %s (name, interval, i_time, r_time, value)\n VALUES ('%s', '%s', %s, %s, %s)'''", '%', '(', 'self', '.', '_table', ',', 'name', ',', 'interval', ',', 'i_time', ',', 'r_time', ',', 'value', ')', 'expire', '=', 'config', '[', "'expire'", ']', 'if', 'ttl', ':', 'stmt', '+=', '" USING TTL %s"', '%', '(', 'ttl', ')', 'return', 'stmt']"
"Helper to generate the insert statement."
"['Helper', 'to', 'generate', 'the', 'insert', 'statement', '.']"
"train"
"https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/cassandra_backend.py#L646-L665"
17
"pavelsof/ipalint"
"ipalint/read.py"
"Reader._determine_dialect"
"def _determine_dialect(self, lines): """ Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method. """ permuts = [(quotechar, escapechar) for quotechar in CSV_QUOTECHARS for escapechar in CSV_ESCAPECHARS] for delim in CSV_DELIMITERS: counts = [line.count(delim) for line in lines] if min(counts) == 0: continue for quotechar, escapechar in permuts: doublequote = True if escapechar is None else False reader = csv.reader(lines, delimiter=delim, quotechar=quotechar, doublequote=doublequote, escapechar=escapechar) try: assert len(set([len(line) for line in reader])) == 1 except AssertionError: continue else: break else: continue # no suitable quoting found break # found it! else: return None return Dialect(delim, quotechar, doublequote, escapechar)"
"python"
"def _determine_dialect(self, lines): """ Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method. """ permuts = [(quotechar, escapechar) for quotechar in CSV_QUOTECHARS for escapechar in CSV_ESCAPECHARS] for delim in CSV_DELIMITERS: counts = [line.count(delim) for line in lines] if min(counts) == 0: continue for quotechar, escapechar in permuts: doublequote = True if escapechar is None else False reader = csv.reader(lines, delimiter=delim, quotechar=quotechar, doublequote=doublequote, escapechar=escapechar) try: assert len(set([len(line) for line in reader])) == 1 except AssertionError: continue else: break else: continue # no suitable quoting found break # found it! else: return None return Dialect(delim, quotechar, doublequote, escapechar)"
"['def', '_determine_dialect', '(', 'self', ',', 'lines', ')', ':', 'permuts', '=', '[', '(', 'quotechar', ',', 'escapechar', ')', 'for', 'quotechar', 'in', 'CSV_QUOTECHARS', 'for', 'escapechar', 'in', 'CSV_ESCAPECHARS', ']', 'for', 'delim', 'in', 'CSV_DELIMITERS', ':', 'counts', '=', '[', 'line', '.', 'count', '(', 'delim', ')', 'for', 'line', 'in', 'lines', ']', 'if', 'min', '(', 'counts', ')', '==', '0', ':', 'continue', 'for', 'quotechar', ',', 'escapechar', 'in', 'permuts', ':', 'doublequote', '=', 'True', 'if', 'escapechar', 'is', 'None', 'else', 'False', 'reader', '=', 'csv', '.', 'reader', '(', 'lines', ',', 'delimiter', '=', 'delim', ',', 'quotechar', '=', 'quotechar', ',', 'doublequote', '=', 'doublequote', ',', 'escapechar', '=', 'escapechar', ')', 'try', ':', 'assert', 'len', '(', 'set', '(', '[', 'len', '(', 'line', ')', 'for', 'line', 'in', 'reader', ']', ')', ')', '==', '1', 'except', 'AssertionError', ':', 'continue', 'else', ':', 'break', 'else', ':', 'continue', '# no suitable quoting found', 'break', '# found it!', 'else', ':', 'return', 'None', 'return', 'Dialect', '(', 'delim', ',', 'quotechar', ',', 'doublequote', ',', 'escapechar', ')']"
"Expects a non-empty [] of strings; these would normally be the first few lines of a csv file. Returns the most likely Dialect named tuple or None if the data seems to form a single column. Ensures that using the returned dialect, all the lines given will have the same number of columns. Helper for the get_dialect method."
"['Expects', 'a', 'non', '-', 'empty', '[]', 'of', 'strings', ';', 'these', 'would', 'normally', 'be', 'the', 'first', 'few', 'lines', 'of', 'a', 'csv', 'file', '.', 'Returns', 'the', 'most', 'likely', 'Dialect', 'named', 'tuple', 'or', 'None', 'if', 'the', 'data', 'seems', 'to', 'form', 'a', 'single', 'column', '.']"
"train"
"https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L177-L218"
18
"angr/angr"
"angr/utils/graph.py"
"compute_dominance_frontier"
"def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df"
"python"
"def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df"
"['def', 'compute_dominance_frontier', '(', 'graph', ',', 'domtree', ')', ':', 'df', '=', '{', '}', '# Perform a post-order search on the dominator tree', 'for', 'x', 'in', 'networkx', '.', 'dfs_postorder_nodes', '(', 'domtree', ')', ':', 'if', 'x', 'not', 'in', 'graph', ':', '# Skip nodes that are not in the graph', 'continue', 'df', '[', 'x', ']', '=', 'set', '(', ')', '# local set', 'for', 'y', 'in', 'graph', '.', 'successors', '(', 'x', ')', ':', 'if', 'x', 'not', 'in', 'domtree', '.', 'predecessors', '(', 'y', ')', ':', 'df', '[', 'x', ']', '.', 'add', '(', 'y', ')', '# up set', 'if', 'x', 'is', 'None', ':', 'continue', 'for', 'z', 'in', 'domtree', '.', 'successors', '(', 'x', ')', ':', 'if', 'z', 'is', 'x', ':', 'continue', 'if', 'z', 'not', 'in', 'df', ':', 'continue', 'for', 'y', 'in', 'df', '[', 'z', ']', ':', 'if', 'x', 'not', 'in', 'list', '(', 'domtree', '.', 'predecessors', '(', 'y', ')', ')', ':', 'df', '[', 'x', ']', '.', 'add', '(', 'y', ')', 'return', 'df']"
"Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier"
"['Compute', 'a', 'dominance', 'frontier', 'based', 'on', 'the', 'given', 'post', '-', 'dominator', 'tree', '.']"
"train"
"https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/utils/graph.py#L63-L104"
19
"CI-WATER/mapkit"
"mapkit/RasterConverter.py"
"RasterConverter.isNumber"
"def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False"
"python"
"def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False"
"['def', 'isNumber', '(', 'self', ',', 'value', ')', ':', 'try', ':', 'str', '(', 'value', ')', 'float', '(', 'value', ')', 'return', 'True', 'except', 'ValueError', ':', 'return', 'False']"
"Validate whether a value is a number or not"
"['Validate', 'whether', 'a', 'value', 'is', 'a', 'number', 'or', 'not']"
"train"
"https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterConverter.py#L1097-L1107"
20
"CamDavidsonPilon/lifelines"
"lifelines/utils/__init__.py"
"survival_table_from_events"
"def survival_table_from_events( death_times, event_observed, birth_times=None, columns=["removed", "observed", "censored", "entrance", "at_risk"], weights=None, collapse=False, intervals=None, ): # pylint: disable=dangerous-default-value,too-many-locals """ Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events """ removed, observed, censored, entrance, at_risk = columns death_times = np.asarray(death_times) if birth_times is None: birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0]) else: birth_times = np.asarray(birth_times) if np.any(birth_times > death_times): raise ValueError("birth time must be less than time of death.") if weights is None: weights = 1 # deal with deaths and censorships df = pd.DataFrame(death_times, columns=["event_at"]) df[removed] = np.asarray(weights) df[observed] = np.asarray(weights) * (np.asarray(event_observed).astype(bool)) death_table = df.groupby("event_at").sum() death_table[censored] = (death_table[removed] - death_table[observed]).astype(int) # deal with late births births = pd.DataFrame(birth_times, columns=["event_at"]) births[entrance] = np.asarray(weights) births_table = births.groupby("event_at").sum() event_table = death_table.join(births_table, how="outer", sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414 event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0) # group by intervals if (collapse) or (intervals is not None): event_table = _group_event_table_by_intervals(event_table, intervals) if (np.asarray(weights).astype(int) != weights).any(): return event_table.astype(float) return event_table.astype(int)"
"python"
"def survival_table_from_events( death_times, event_observed, birth_times=None, columns=["removed", "observed", "censored", "entrance", "at_risk"], weights=None, collapse=False, intervals=None, ): # pylint: disable=dangerous-default-value,too-many-locals """ Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events """ removed, observed, censored, entrance, at_risk = columns death_times = np.asarray(death_times) if birth_times is None: birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0]) else: birth_times = np.asarray(birth_times) if np.any(birth_times > death_times): raise ValueError("birth time must be less than time of death.") if weights is None: weights = 1 # deal with deaths and censorships df = pd.DataFrame(death_times, columns=["event_at"]) df[removed] = np.asarray(weights) df[observed] = np.asarray(weights) * (np.asarray(event_observed).astype(bool)) death_table = df.groupby("event_at").sum() death_table[censored] = (death_table[removed] - death_table[observed]).astype(int) # deal with late births births = pd.DataFrame(birth_times, columns=["event_at"]) births[entrance] = np.asarray(weights) births_table = births.groupby("event_at").sum() event_table = death_table.join(births_table, how="outer", sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414 event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0) # group by intervals if (collapse) or (intervals is not None): event_table = _group_event_table_by_intervals(event_table, intervals) if (np.asarray(weights).astype(int) != weights).any(): return event_table.astype(float) return event_table.astype(int)"
"['def', 'survival_table_from_events', '(', 'death_times', ',', 'event_observed', ',', 'birth_times', '=', 'None', ',', 'columns', '=', '[', '"removed"', ',', '"observed"', ',', '"censored"', ',', '"entrance"', ',', '"at_risk"', ']', ',', 'weights', '=', 'None', ',', 'collapse', '=', 'False', ',', 'intervals', '=', 'None', ',', ')', ':', '# pylint: disable=dangerous-default-value,too-many-locals', 'removed', ',', 'observed', ',', 'censored', ',', 'entrance', ',', 'at_risk', '=', 'columns', 'death_times', '=', 'np', '.', 'asarray', '(', 'death_times', ')', 'if', 'birth_times', 'is', 'None', ':', 'birth_times', '=', 'min', '(', '0', ',', 'death_times', '.', 'min', '(', ')', ')', '*', 'np', '.', 'ones', '(', 'death_times', '.', 'shape', '[', '0', ']', ')', 'else', ':', 'birth_times', '=', 'np', '.', 'asarray', '(', 'birth_times', ')', 'if', 'np', '.', 'any', '(', 'birth_times', '>', 'death_times', ')', ':', 'raise', 'ValueError', '(', '"birth time must be less than time of death."', ')', 'if', 'weights', 'is', 'None', ':', 'weights', '=', '1', '# deal with deaths and censorships', 'df', '=', 'pd', '.', 'DataFrame', '(', 'death_times', ',', 'columns', '=', '[', '"event_at"', ']', ')', 'df', '[', 'removed', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', 'df', '[', 'observed', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', '*', '(', 'np', '.', 'asarray', '(', 'event_observed', ')', '.', 'astype', '(', 'bool', ')', ')', 'death_table', '=', 'df', '.', 'groupby', '(', '"event_at"', ')', '.', 'sum', '(', ')', 'death_table', '[', 'censored', ']', '=', '(', 'death_table', '[', 'removed', ']', '-', 'death_table', '[', 'observed', ']', ')', '.', 'astype', '(', 'int', ')', '# deal with late births', 'births', '=', 'pd', '.', 'DataFrame', '(', 'birth_times', ',', 'columns', '=', '[', '"event_at"', ']', ')', 'births', '[', 'entrance', ']', '=', 'np', '.', 'asarray', '(', 'weights', ')', 'births_table', '=', 'births', '.', 'groupby', '(', '"event_at"', ')', '.', 'sum', '(', ')', 'event_table', '=', 'death_table', '.', 'join', '(', 'births_table', ',', 'how', '=', '"outer"', ',', 'sort', '=', 'True', ')', '.', 'fillna', '(', '0', ')', '# http://wesmckinney.com/blog/?p=414', 'event_table', '[', 'at_risk', ']', '=', 'event_table', '[', 'entrance', ']', '.', 'cumsum', '(', ')', '-', 'event_table', '[', 'removed', ']', '.', 'cumsum', '(', ')', '.', 'shift', '(', '1', ')', '.', 'fillna', '(', '0', ')', '# group by intervals', 'if', '(', 'collapse', ')', 'or', '(', 'intervals', 'is', 'not', 'None', ')', ':', 'event_table', '=', '_group_event_table_by_intervals', '(', 'event_table', ',', 'intervals', ')', 'if', '(', 'np', '.', 'asarray', '(', 'weights', ')', '.', 'astype', '(', 'int', ')', '!=', 'weights', ')', '.', 'any', '(', ')', ':', 'return', 'event_table', '.', 'astype', '(', 'float', ')', 'return', 'event_table', '.', 'astype', '(', 'int', ')']"
"Parameters ---------- death_times: (n,) array represent the event times event_observed: (n,) array 1 if observed event, 0 is censored event. birth_times: a (n,) array, optional representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: iterable, optional a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: (n,1) array, optional Optional argument to use weights for individuals. Assumes weights of 1 if not provided. collapse: boolean, optional (default=False) If True, collapses survival table into lifetable to show events in interval bins intervals: iterable, optional Default None, otherwise a list/(n,1) array of interval edge measures. If left as None while collapse=True, then Freedman-Diaconis rule for histogram bins will be used to determine intervals. Returns ------- DataFrame Pandas DataFrame with index as the unique times or intervals in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example ------- >>> #Uncollapsed output >>> removed observed censored entrance at_risk >>> event_at >>> 0 0 0 0 11 11 >>> 6 1 1 0 0 11 >>> 7 2 2 0 0 10 >>> 9 3 3 0 0 8 >>> 13 3 3 0 0 5 >>> 15 2 2 0 0 2 >>> #Collapsed output >>> removed observed censored at_risk >>> sum sum sum max >>> event_at >>> (0, 2] 34 33 1 312 >>> (2, 4] 84 42 42 278 >>> (4, 6] 64 17 47 194 >>> (6, 8] 63 16 47 130 >>> (8, 10] 35 12 23 67 >>> (10, 12] 24 5 19 32 See Also -------- group_survival_table_from_events"
"['Parameters', '----------', 'death_times', ':', '(', 'n', ')', 'array', 'represent', 'the', 'event', 'times', 'event_observed', ':', '(', 'n', ')', 'array', '1', 'if', 'observed', 'event', '0', 'is', 'censored', 'event', '.', 'birth_times', ':', 'a', '(', 'n', ')', 'array', 'optional', 'representing', 'when', 'the', 'subject', 'was', 'first', 'observed', '.', 'A', 'subject', 's', 'death', 'event', 'is', 'then', 'at', '[', 'birth', 'times', '+', 'duration', 'observed', ']', '.', 'If', 'None', '(', 'default', ')', 'birth_times', 'are', 'set', 'to', 'be', 'the', 'first', 'observation', 'or', '0', 'which', 'ever', 'is', 'smaller', '.', 'columns', ':', 'iterable', 'optional', 'a', '3', '-', 'length', 'array', 'to', 'call', 'the', 'in', 'order', 'removed', 'individuals', 'observed', 'deaths', 'and', 'censorships', '.', 'weights', ':', '(', 'n', '1', ')', 'array', 'optional', 'Optional', 'argument', 'to', 'use', 'weights', 'for', 'individuals', '.', 'Assumes', 'weights', 'of', '1', 'if', 'not', 'provided', '.', 'collapse', ':', 'boolean', 'optional', '(', 'default', '=', 'False', ')', 'If', 'True', 'collapses', 'survival', 'table', 'into', 'lifetable', 'to', 'show', 'events', 'in', 'interval', 'bins', 'intervals', ':', 'iterable', 'optional', 'Default', 'None', 'otherwise', 'a', 'list', '/', '(', 'n', '1', ')', 'array', 'of', 'interval', 'edge', 'measures', '.', 'If', 'left', 'as', 'None', 'while', 'collapse', '=', 'True', 'then', 'Freedman', '-', 'Diaconis', 'rule', 'for', 'histogram', 'bins', 'will', 'be', 'used', 'to', 'determine', 'intervals', '.']"
"train"
"https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/__init__.py#L262-L361"
21
"django-blog-zinnia/cmsplugin-zinnia"
"cmsplugin_zinnia/cms_plugins.py"
"CMSRandomEntriesPlugin.render"
"def render(self, context, instance, placeholder): """ Update the context with plugin's data """ context = super(CMSRandomEntriesPlugin, self).render( context, instance, placeholder) context['template_to_render'] = (str(instance.template_to_render) or 'zinnia/tags/entries_random.html') return context"
"python"
"def render(self, context, instance, placeholder): """ Update the context with plugin's data """ context = super(CMSRandomEntriesPlugin, self).render( context, instance, placeholder) context['template_to_render'] = (str(instance.template_to_render) or 'zinnia/tags/entries_random.html') return context"
"['def', 'render', '(', 'self', ',', 'context', ',', 'instance', ',', 'placeholder', ')', ':', 'context', '=', 'super', '(', 'CMSRandomEntriesPlugin', ',', 'self', ')', '.', 'render', '(', 'context', ',', 'instance', ',', 'placeholder', ')', 'context', '[', "'template_to_render'", ']', '=', '(', 'str', '(', 'instance', '.', 'template_to_render', ')', 'or', "'zinnia/tags/entries_random.html'", ')', 'return', 'context']"
"Update the context with plugin's data"
"['Update', 'the', 'context', 'with', 'plugin', 's', 'data']"
"train"
"https://github.com/django-blog-zinnia/cmsplugin-zinnia/blob/7613c0d9ae29affe9ab97527e4b6d5bef124afdc/cmsplugin_zinnia/cms_plugins.py#L131-L139"
22
"rjw57/throw"
"throw/minus/minus.py"
"Gallery.SaveGallery"
"def SaveGallery(self, name=None, items=None): """Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change.""" url = 'http://min.us/api/SaveGallery' if not name: if not self.name: name = self.GetItems()[0] if self.name: name = self.name if not items: if not self.items: items = self.GetItems()[1] elif self.items: items = self.items params = {"name": name, "id":self.editor_id, "items":items} try: response = _dopost(url, params) except: pass else: self.name = name self.items = items"
"python"
"def SaveGallery(self, name=None, items=None): """Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change.""" url = 'http://min.us/api/SaveGallery' if not name: if not self.name: name = self.GetItems()[0] if self.name: name = self.name if not items: if not self.items: items = self.GetItems()[1] elif self.items: items = self.items params = {"name": name, "id":self.editor_id, "items":items} try: response = _dopost(url, params) except: pass else: self.name = name self.items = items"
"['def', 'SaveGallery', '(', 'self', ',', 'name', '=', 'None', ',', 'items', '=', 'None', ')', ':', 'url', '=', "'http://min.us/api/SaveGallery'", 'if', 'not', 'name', ':', 'if', 'not', 'self', '.', 'name', ':', 'name', '=', 'self', '.', 'GetItems', '(', ')', '[', '0', ']', 'if', 'self', '.', 'name', ':', 'name', '=', 'self', '.', 'name', 'if', 'not', 'items', ':', 'if', 'not', 'self', '.', 'items', ':', 'items', '=', 'self', '.', 'GetItems', '(', ')', '[', '1', ']', 'elif', 'self', '.', 'items', ':', 'items', '=', 'self', '.', 'items', 'params', '=', '{', '"name"', ':', 'name', ',', '"id"', ':', 'self', '.', 'editor_id', ',', '"items"', ':', 'items', '}', 'try', ':', 'response', '=', '_dopost', '(', 'url', ',', 'params', ')', 'except', ':', 'pass', 'else', ':', 'self', '.', 'name', '=', 'name', 'self', '.', 'items', '=', 'items']"
"Use this to update the gallery name or change sort order. Specify which attribute (name or items or both) you want to change."
"['Use', 'this', 'to', 'update', 'the', 'gallery', 'name', 'or', 'change', 'sort', 'order', '.', 'Specify', 'which', 'attribute', '(', 'name', 'or', 'items', 'or', 'both', ')', 'you', 'want', 'to', 'change', '.']"
"train"
"https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/minus/minus.py#L62-L88"
23
"letuananh/chirptext"
"chirptext/cli.py"
"setup_logging"
"def setup_logging(filename, log_dir=None, force_setup=False): ''' Try to load logging configuration from a file. Set level to INFO if failed. ''' if not force_setup and ChirpCLI.SETUP_COMPLETED: logging.debug("Master logging has been setup. This call will be ignored.") return if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) if os.path.isfile(filename): with open(filename) as config_file: try: config = json.load(config_file) logging.config.dictConfig(config) logging.info("logging was setup using {}".format(filename)) ChirpCLI.SETUP_COMPLETED = True except Exception as e: logging.exception("Could not load logging config") # default logging config logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.INFO)"
"python"
"def setup_logging(filename, log_dir=None, force_setup=False): ''' Try to load logging configuration from a file. Set level to INFO if failed. ''' if not force_setup and ChirpCLI.SETUP_COMPLETED: logging.debug("Master logging has been setup. This call will be ignored.") return if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) if os.path.isfile(filename): with open(filename) as config_file: try: config = json.load(config_file) logging.config.dictConfig(config) logging.info("logging was setup using {}".format(filename)) ChirpCLI.SETUP_COMPLETED = True except Exception as e: logging.exception("Could not load logging config") # default logging config logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.INFO)"
"['def', 'setup_logging', '(', 'filename', ',', 'log_dir', '=', 'None', ',', 'force_setup', '=', 'False', ')', ':', 'if', 'not', 'force_setup', 'and', 'ChirpCLI', '.', 'SETUP_COMPLETED', ':', 'logging', '.', 'debug', '(', '"Master logging has been setup. This call will be ignored."', ')', 'return', 'if', 'log_dir', 'and', 'not', 'os', '.', 'path', '.', 'exists', '(', 'log_dir', ')', ':', 'os', '.', 'makedirs', '(', 'log_dir', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'with', 'open', '(', 'filename', ')', 'as', 'config_file', ':', 'try', ':', 'config', '=', 'json', '.', 'load', '(', 'config_file', ')', 'logging', '.', 'config', '.', 'dictConfig', '(', 'config', ')', 'logging', '.', 'info', '(', '"logging was setup using {}"', '.', 'format', '(', 'filename', ')', ')', 'ChirpCLI', '.', 'SETUP_COMPLETED', '=', 'True', 'except', 'Exception', 'as', 'e', ':', 'logging', '.', 'exception', '(', '"Could not load logging config"', ')', '# default logging config', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')', 'else', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')']"
"Try to load logging configuration from a file. Set level to INFO if failed."
"['Try', 'to', 'load', 'logging', 'configuration', 'from', 'a', 'file', '.', 'Set', 'level', 'to', 'INFO', 'if', 'failed', '.']"
"train"
"https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/cli.py#L35-L55"
24
"doconix/django-mako-plus"
"django_mako_plus/template/adapter.py"
"MakoTemplateAdapter.name"
"def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'"
"python"
"def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'"
"['def', 'name', '(', 'self', ')', ':', 'if', 'self', '.', 'mako_template', '.', 'filename', ':', 'return', 'os', '.', 'path', '.', 'basename', '(', 'self', '.', 'mako_template', '.', 'filename', ')', 'return', "'string'"]"
"Returns the name of this template (if created from a file) or "string" if not"
"['Returns', 'the', 'name', 'of', 'this', 'template', '(', 'if', 'created', 'from', 'a', 'file', ')', 'or', 'string', 'if', 'not']"
"train"
"https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/template/adapter.py#L39-L43"
25
"HazyResearch/fonduer"
"src/fonduer/learning/disc_models/sparse_lstm.py"
"SparseLSTM.forward"
"def forward(self, X): """Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) # Generate lstm weight indices x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) # Calculate textual features from LSTMs for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) # Concatenate textual features with multi-modal features feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)"
"python"
"def forward(self, X): """Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) # Generate lstm weight indices x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) # Calculate textual features from LSTMs for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) # Concatenate textual features with multi-modal features feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)"
"['def', 'forward', '(', 'self', ',', 'X', ')', ':', 's', '=', 'X', '[', ':', '-', '2', ']', 'f', '=', 'X', '[', '-', '2', ']', 'w', '=', 'X', '[', '-', '1', ']', 'batch_size', '=', 'len', '(', 'f', ')', '# Generate lstm weight indices', 'x_idx', '=', 'self', '.', '_cuda', '(', 'torch', '.', 'as_tensor', '(', 'np', '.', 'arange', '(', '1', ',', 'self', '.', 'settings', '[', '"lstm_dim"', ']', '+', '1', ')', ')', '.', 'repeat', '(', 'batch_size', ',', '1', ')', ')', 'outputs', '=', 'self', '.', '_cuda', '(', 'torch', '.', 'Tensor', '(', '[', ']', ')', ')', '# Calculate textual features from LSTMs', 'for', 'i', 'in', 'range', '(', 'len', '(', 's', ')', ')', ':', 'state_word', '=', 'self', '.', 'lstms', '[', '0', ']', '.', 'init_hidden', '(', 'batch_size', ')', 'output', '=', 'self', '.', 'lstms', '[', '0', ']', '.', 'forward', '(', 's', '[', 'i', ']', '[', '0', ']', ',', 's', '[', 'i', ']', '[', '1', ']', ',', 'state_word', ')', 'outputs', '=', 'torch', '.', 'cat', '(', '(', 'outputs', ',', 'output', ')', ',', '1', ')', '# Concatenate textual features with multi-modal features', 'feaures', '=', 'torch', '.', 'cat', '(', '(', 'x_idx', ',', 'f', ')', ',', '1', ')', 'weights', '=', 'torch', '.', 'cat', '(', '(', 'outputs', ',', 'w', ')', ',', '1', ')', 'return', 'self', '.', 'sparse_linear', '(', 'feaures', ',', 'weights', ')']"
"Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes)"
"['Forward', 'function', '.']"
"train"
"https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/sparse_lstm.py#L25-L64"
26
"google/grr"
"grr/server/grr_response_server/databases/mysql_flows.py"
"MySQLDBFlowMixin.CountFlowResultsByType"
"def CountFlowResultsByType(self, client_id, flow_id, cursor=None): """Returns counts of flow results grouped by result type.""" query = ("SELECT type, COUNT(*) FROM flow_results " "FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) " "WHERE client_id = %s AND flow_id = %s " "GROUP BY type") args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)] cursor.execute(query, args) return dict(cursor.fetchall())"
"python"
"def CountFlowResultsByType(self, client_id, flow_id, cursor=None): """Returns counts of flow results grouped by result type.""" query = ("SELECT type, COUNT(*) FROM flow_results " "FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) " "WHERE client_id = %s AND flow_id = %s " "GROUP BY type") args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)] cursor.execute(query, args) return dict(cursor.fetchall())"
"['def', 'CountFlowResultsByType', '(', 'self', ',', 'client_id', ',', 'flow_id', ',', 'cursor', '=', 'None', ')', ':', 'query', '=', '(', '"SELECT type, COUNT(*) FROM flow_results "', '"FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) "', '"WHERE client_id = %s AND flow_id = %s "', '"GROUP BY type"', ')', 'args', '=', '[', 'db_utils', '.', 'ClientIDToInt', '(', 'client_id', ')', ',', 'db_utils', '.', 'FlowIDToInt', '(', 'flow_id', ')', ']', 'cursor', '.', 'execute', '(', 'query', ',', 'args', ')', 'return', 'dict', '(', 'cursor', '.', 'fetchall', '(', ')', ')']"
"Returns counts of flow results grouped by result type."
"['Returns', 'counts', 'of', 'flow', 'results', 'grouped', 'by', 'result', 'type', '.']"
"train"
"https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L1350-L1360"
27
"google/grr"
"grr/client/grr_response_client/client_actions/artifact_collector.py"
"ArtifactCollector._ProcessGrepSource"
"def _ProcessGrepSource(self, source): """Find files fulfilling regex conditions.""" attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request"
"python"
"def _ProcessGrepSource(self, source): """Find files fulfilling regex conditions.""" attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request"
"['def', '_ProcessGrepSource', '(', 'self', ',', 'source', ')', ':', 'attributes', '=', 'source', '.', 'base_source', '.', 'attributes', 'paths', '=', 'artifact_utils', '.', 'InterpolateListKbAttributes', '(', 'attributes', '[', '"paths"', ']', ',', 'self', '.', 'knowledge_base', ',', 'self', '.', 'ignore_interpolation_errors', ')', 'regex', '=', 'utils', '.', 'RegexListDisjunction', '(', 'attributes', '[', '"content_regex_list"', ']', ')', 'condition', '=', 'rdf_file_finder', '.', 'FileFinderCondition', '.', 'ContentsRegexMatch', '(', 'regex', '=', 'regex', ',', 'mode', '=', '"ALL_HITS"', ')', 'file_finder_action', '=', 'rdf_file_finder', '.', 'FileFinderAction', '.', 'Stat', '(', ')', 'request', '=', 'rdf_file_finder', '.', 'FileFinderArgs', '(', 'paths', '=', 'paths', ',', 'action', '=', 'file_finder_action', ',', 'conditions', '=', '[', 'condition', ']', ',', 'follow_links', '=', 'True', ')', 'action', '=', 'file_finder', '.', 'FileFinderOSFromClient', 'yield', 'action', ',', 'request']"
"Find files fulfilling regex conditions."
"['Find', 'files', 'fulfilling', 'regex', 'conditions', '.']"
"train"
"https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/artifact_collector.py#L208-L225"
28
"F5Networks/f5-common-python"
"f5/multi_device/cluster/__init__.py"
"ClusterManager.manage_extant"
"def manage_extant(self, **kwargs): '''Manage an existing cluster :param kwargs: dict -- keyword args in dict ''' self._check_device_number(kwargs['devices']) self.trust_domain = TrustDomain( devices=kwargs['devices'], partition=kwargs['device_group_partition'] ) self.device_group = DeviceGroup(**kwargs) self.cluster = Cluster(**kwargs)"
"python"
"def manage_extant(self, **kwargs): '''Manage an existing cluster :param kwargs: dict -- keyword args in dict ''' self._check_device_number(kwargs['devices']) self.trust_domain = TrustDomain( devices=kwargs['devices'], partition=kwargs['device_group_partition'] ) self.device_group = DeviceGroup(**kwargs) self.cluster = Cluster(**kwargs)"
"['def', 'manage_extant', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', '_check_device_number', '(', 'kwargs', '[', "'devices'", ']', ')', 'self', '.', 'trust_domain', '=', 'TrustDomain', '(', 'devices', '=', 'kwargs', '[', "'devices'", ']', ',', 'partition', '=', 'kwargs', '[', "'device_group_partition'", ']', ')', 'self', '.', 'device_group', '=', 'DeviceGroup', '(', '*', '*', 'kwargs', ')', 'self', '.', 'cluster', '=', 'Cluster', '(', '*', '*', 'kwargs', ')']"
"Manage an existing cluster :param kwargs: dict -- keyword args in dict"
"['Manage', 'an', 'existing', 'cluster']"
"train"
"https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/multi_device/cluster/__init__.py#L136-L148"
29
"google/python-gflags"
"gflags/flagvalues.py"
"FlagValues._GetFlagsDefinedByModule"
"def _GetFlagsDefinedByModule(self, module): """Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ return list(self.FlagsByModuleDict().get(module, []))"
"python"
"def _GetFlagsDefinedByModule(self, module): """Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ return list(self.FlagsByModuleDict().get(module, []))"
"['def', '_GetFlagsDefinedByModule', '(', 'self', ',', 'module', ')', ':', 'if', 'not', 'isinstance', '(', 'module', ',', 'str', ')', ':', 'module', '=', 'module', '.', '__name__', 'return', 'list', '(', 'self', '.', 'FlagsByModuleDict', '(', ')', '.', 'get', '(', 'module', ',', '[', ']', ')', ')']"
"Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object."
"['Returns', 'the', 'list', 'of', 'flags', 'defined', 'by', 'a', 'module', '.']"
"train"
"https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L265-L279"
30
"openp2pdesign/makerlabs"
"makerlabs/hackaday_io.py"
"get_labs"
"def get_labs(format): """Gets Hackerspaces data from hackaday.io.""" hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url) hackerspaces = {} # Load all the Hackerspaces for i in hackerspaces_json: current_lab = Hackerspace() current_lab.id = i["id"] current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id current_lab.name = i["name"] if len(i["description"]) != 0: current_lab.description = i["description"] elif len(i["summary"]) != 0: current_lab.description = i["summary"] current_lab.created_at = i["moments"]["exact"] # Check if there are coordinates if i["latlon"] is not None: latlon = json.loads(i["latlon"]) current_lab.latitude = latlon["lat"] current_lab.longitude = latlon["lng"] # Get country, county and city from them country = geolocator.reverse( [latlon["lat"], latlon["lng"]]) current_lab.country = country.raw[ "address"]["country"] current_lab.address = country.raw["display_name"] current_lab.address_1 = country.raw["display_name"] current_lab.country_code = country.raw[ "address"]["country_code"] current_lab.county = country.raw[ "address"]["state_district"] current_lab.city = country.raw[ "address"]["city"] current_lab.postal_code = country.raw[ "address"]["postcode"] else: # For labs without a location or coordinates # add 0,0 as coordinates current_lab.latitude = 0.0 current_lab.longitude = 0.0 # Add the lab hackerspaces[i["name"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in hackerspaces: single = hackerspaces[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = hackerspaces # Default: return an oject else: output = hackerspaces # Return a proper json if format.lower() == "json": output = json.dumps(output) return output"
"python"
"def get_labs(format): """Gets Hackerspaces data from hackaday.io.""" hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url) hackerspaces = {} # Load all the Hackerspaces for i in hackerspaces_json: current_lab = Hackerspace() current_lab.id = i["id"] current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id current_lab.name = i["name"] if len(i["description"]) != 0: current_lab.description = i["description"] elif len(i["summary"]) != 0: current_lab.description = i["summary"] current_lab.created_at = i["moments"]["exact"] # Check if there are coordinates if i["latlon"] is not None: latlon = json.loads(i["latlon"]) current_lab.latitude = latlon["lat"] current_lab.longitude = latlon["lng"] # Get country, county and city from them country = geolocator.reverse( [latlon["lat"], latlon["lng"]]) current_lab.country = country.raw[ "address"]["country"] current_lab.address = country.raw["display_name"] current_lab.address_1 = country.raw["display_name"] current_lab.country_code = country.raw[ "address"]["country_code"] current_lab.county = country.raw[ "address"]["state_district"] current_lab.city = country.raw[ "address"]["city"] current_lab.postal_code = country.raw[ "address"]["postcode"] else: # For labs without a location or coordinates # add 0,0 as coordinates current_lab.latitude = 0.0 current_lab.longitude = 0.0 # Add the lab hackerspaces[i["name"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in hackerspaces: single = hackerspaces[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in hackerspaces: output[j] = hackerspaces[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = hackerspaces # Default: return an oject else: output = hackerspaces # Return a proper json if format.lower() == "json": output = json.dumps(output) return output"
"['def', 'get_labs', '(', 'format', ')', ':', 'hackerspaces_json', '=', 'data_from_hackaday_io', '(', 'hackaday_io_labs_map_url', ')', 'hackerspaces', '=', '{', '}', '# Load all the Hackerspaces', 'for', 'i', 'in', 'hackerspaces_json', ':', 'current_lab', '=', 'Hackerspace', '(', ')', 'current_lab', '.', 'id', '=', 'i', '[', '"id"', ']', 'current_lab', '.', 'url', '=', '"https://hackaday.io/hackerspace/"', '+', 'current_lab', '.', 'id', 'current_lab', '.', 'name', '=', 'i', '[', '"name"', ']', 'if', 'len', '(', 'i', '[', '"description"', ']', ')', '!=', '0', ':', 'current_lab', '.', 'description', '=', 'i', '[', '"description"', ']', 'elif', 'len', '(', 'i', '[', '"summary"', ']', ')', '!=', '0', ':', 'current_lab', '.', 'description', '=', 'i', '[', '"summary"', ']', 'current_lab', '.', 'created_at', '=', 'i', '[', '"moments"', ']', '[', '"exact"', ']', '# Check if there are coordinates', 'if', 'i', '[', '"latlon"', ']', 'is', 'not', 'None', ':', 'latlon', '=', 'json', '.', 'loads', '(', 'i', '[', '"latlon"', ']', ')', 'current_lab', '.', 'latitude', '=', 'latlon', '[', '"lat"', ']', 'current_lab', '.', 'longitude', '=', 'latlon', '[', '"lng"', ']', '# Get country, county and city from them', 'country', '=', 'geolocator', '.', 'reverse', '(', '[', 'latlon', '[', '"lat"', ']', ',', 'latlon', '[', '"lng"', ']', ']', ')', 'current_lab', '.', 'country', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"country"', ']', 'current_lab', '.', 'address', '=', 'country', '.', 'raw', '[', '"display_name"', ']', 'current_lab', '.', 'address_1', '=', 'country', '.', 'raw', '[', '"display_name"', ']', 'current_lab', '.', 'country_code', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"country_code"', ']', 'current_lab', '.', 'county', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"state_district"', ']', 'current_lab', '.', 'city', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"city"', ']', 'current_lab', '.', 'postal_code', '=', 'country', '.', 'raw', '[', '"address"', ']', '[', '"postcode"', ']', 'else', ':', '# For labs without a location or coordinates', '# add 0,0 as coordinates', 'current_lab', '.', 'latitude', '=', '0.0', 'current_lab', '.', 'longitude', '=', '0.0', '# Add the lab', 'hackerspaces', '[', 'i', '[', '"name"', ']', ']', '=', 'current_lab', '# Return a dictiornary / json', 'if', 'format', '.', 'lower', '(', ')', '==', '"dict"', 'or', 'format', '.', 'lower', '(', ')', '==', '"json"', ':', 'output', '=', '{', '}', 'for', 'j', 'in', 'hackerspaces', ':', 'output', '[', 'j', ']', '=', 'hackerspaces', '[', 'j', ']', '.', '__dict__', '# Return a geojson', 'elif', 'format', '.', 'lower', '(', ')', '==', '"geojson"', 'or', 'format', '.', 'lower', '(', ')', '==', '"geo"', ':', 'labs_list', '=', '[', ']', 'for', 'l', 'in', 'hackerspaces', ':', 'single', '=', 'hackerspaces', '[', 'l', ']', '.', '__dict__', 'single_lab', '=', 'Feature', '(', 'type', '=', '"Feature"', ',', 'geometry', '=', 'Point', '(', '(', 'single', '[', '"latitude"', ']', ',', 'single', '[', '"longitude"', ']', ')', ')', ',', 'properties', '=', 'single', ')', 'labs_list', '.', 'append', '(', 'single_lab', ')', 'output', '=', 'dumps', '(', 'FeatureCollection', '(', 'labs_list', ')', ')', '# Return a Pandas DataFrame', 'elif', 'format', '.', 'lower', '(', ')', '==', '"pandas"', 'or', 'format', '.', 'lower', '(', ')', '==', '"dataframe"', ':', 'output', '=', '{', '}', 'for', 'j', 'in', 'hackerspaces', ':', 'output', '[', 'j', ']', '=', 'hackerspaces', '[', 'j', ']', '.', '__dict__', '# Transform the dict into a Pandas DataFrame', 'output', '=', 'pd', '.', 'DataFrame', '.', 'from_dict', '(', 'output', ')', 'output', '=', 'output', '.', 'transpose', '(', ')', '# Return an object', 'elif', 'format', '.', 'lower', '(', ')', '==', '"object"', 'or', 'format', '.', 'lower', '(', ')', '==', '"obj"', ':', 'output', '=', 'hackerspaces', '# Default: return an oject', 'else', ':', 'output', '=', 'hackerspaces', '# Return a proper json', 'if', 'format', '.', 'lower', '(', ')', '==', '"json"', ':', 'output', '=', 'json', '.', 'dumps', '(', 'output', ')', 'return', 'output']"
"Gets Hackerspaces data from hackaday.io."
"['Gets', 'Hackerspaces', 'data', 'from', 'hackaday', '.', 'io', '.']"
"train"
"https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/hackaday_io.py#L57-L137"
31
"Azure/blobxfer"
"cli/cli.py"
"upload"
"def upload(ctx): """Upload files to Azure Storage""" settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ctx.general_options, ctx.credentials, spec ).start()"
"python"
"def upload(ctx): """Upload files to Azure Storage""" settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ctx.general_options, ctx.credentials, spec ).start()"
"['def', 'upload', '(', 'ctx', ')', ':', 'settings', '.', 'add_cli_options', '(', 'ctx', '.', 'cli_options', ',', 'settings', '.', 'TransferAction', '.', 'Upload', ')', 'ctx', '.', 'initialize', '(', 'settings', '.', 'TransferAction', '.', 'Upload', ')', 'specs', '=', 'settings', '.', 'create_upload_specifications', '(', 'ctx', '.', 'cli_options', ',', 'ctx', '.', 'config', ')', 'del', 'ctx', '.', 'cli_options', 'for', 'spec', 'in', 'specs', ':', 'blobxfer', '.', 'api', '.', 'Uploader', '(', 'ctx', '.', 'general_options', ',', 'ctx', '.', 'credentials', ',', 'spec', ')', '.', 'start', '(', ')']"
"Upload files to Azure Storage"
"['Upload', 'files', 'to', 'Azure', 'Storage']"
"train"
"https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/cli/cli.py#L1106-L1116"
32
"mitsei/dlkit"
"dlkit/json_/resource/sessions.py"
"ResourceBinAssignmentSession.get_assignable_bin_ids"
"def get_assignable_bin_ids(self, bin_id): """Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy) bins = lookup_session.get_bins() id_list = [] for bin in bins: id_list.append(bin.get_id()) return IdList(id_list)"
"python"
"def get_assignable_bin_ids(self, bin_id): """Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy) bins = lookup_session.get_bins() id_list = [] for bin in bins: id_list.append(bin.get_id()) return IdList(id_list)"
"['def', 'get_assignable_bin_ids', '(', 'self', ',', 'bin_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids', '# This will likely be overridden by an authorization adapter', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'RESOURCE'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_bin_lookup_session', '(', 'proxy', '=', 'self', '.', '_proxy', ')', 'bins', '=', 'lookup_session', '.', 'get_bins', '(', ')', 'id_list', '=', '[', ']', 'for', 'bin', 'in', 'bins', ':', 'id_list', '.', 'append', '(', 'bin', '.', 'get_id', '(', ')', ')', 'return', 'IdList', '(', 'id_list', ')']"
"Gets a list of bins including and under the given bin node in which any resource can be assigned. arg: bin_id (osid.id.Id): the ``Id`` of the ``Bin`` return: (osid.id.IdList) - list of assignable bin ``Ids`` raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*"
"['Gets', 'a', 'list', 'of', 'bins', 'including', 'and', 'under', 'the', 'given', 'bin', 'node', 'in', 'which', 'any', 'resource', 'can', 'be', 'assigned', '.']"
"train"
"https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L1562-L1581"
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
655