text
stringlengths
94
87.1k
code_tokens
sequence
avg_line_len
float64
7.91
668
score
sequence
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths): """ Get the cached commit times for the combination of this parent_dir and relpaths Return the commit assigned to this combination and the actual times! """ result = get_all_cached_commit_times(root_folder) for item in result: if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir: return item.get("commit"), item.get("commit_times") return None, {}
[ "def", "get_cached_commit_times", "(", "root_folder", ",", "parent_dir", ",", "sorted_relpaths", ")", ":", "result", "=", "get_all_cached_commit_times", "(", "root_folder", ")", "for", "item", "in", "result", ":", "if", "sorted", "(", "item", ".", "get", "(", "\"sorted_relpaths\"", ",", "[", "]", ")", ")", "==", "sorted_relpaths", "and", "item", ".", "get", "(", "\"parent_dir\"", ")", "==", "parent_dir", ":", "return", "item", ".", "get", "(", "\"commit\"", ")", ",", "item", ".", "get", "(", "\"commit_times\"", ")", "return", "None", ",", "{", "}" ]
39.076923
[ 0.014285714285714285, 0.2857142857142857, 0.03614457831325301, 0, 0.027777777777777776, 0.2857142857142857, 0.03773584905660377, 0, 0.08695652173913043, 0.02702702702702703, 0.031746031746031744, 0, 0.10526315789473684 ]
def _find_value(ret_dict, key, path=None): ''' PRIVATE METHOD Traverses a dictionary of dictionaries/lists to find key and return the value stored. TODO:// this method doesn't really work very well, and it's not really very useful in its current state. The purpose for this method is to simplify parsing the JSON output so you can just pass the key you want to find and have it return the value. ret : dict<str,obj> The dictionary to search through. Typically this will be a dict returned from solr. key : str The key (str) to find in the dictionary Return: list<dict<str,obj>>:: [{path:path, value:value}] ''' if path is None: path = key else: path = "{0}:{1}".format(path, key) ret = [] for ikey, val in six.iteritems(ret_dict): if ikey == key: ret.append({path: val}) if isinstance(val, list): for item in val: if isinstance(item, dict): ret = ret + _find_value(item, key, path) if isinstance(val, dict): ret = ret + _find_value(val, key, path) return ret
[ "def", "_find_value", "(", "ret_dict", ",", "key", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "key", "else", ":", "path", "=", "\"{0}:{1}\"", ".", "format", "(", "path", ",", "key", ")", "ret", "=", "[", "]", "for", "ikey", ",", "val", "in", "six", ".", "iteritems", "(", "ret_dict", ")", ":", "if", "ikey", "==", "key", ":", "ret", ".", "append", "(", "{", "path", ":", "val", "}", ")", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "item", "in", "val", ":", "if", "isinstance", "(", "item", ",", "dict", ")", ":", "ret", "=", "ret", "+", "_find_value", "(", "item", ",", "key", ",", "path", ")", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "ret", "=", "ret", "+", "_find_value", "(", "val", ",", "key", ",", "path", ")", "return", "ret" ]
33.228571
[ 0.023809523809523808, 0.2857142857142857, 0.1111111111111111, 0.03333333333333333, 0.0625, 0.05405405405405406, 0.02631578947368421, 0.02631578947368421, 0.034482758620689655, 0.2608695652173913, 0.028169014084507043, 0.07407407407407407, 0.23076923076923078, 0.06382978723404255, 0, 0.21212121212121213, 0, 0.11764705882352941, 0.2857142857142857, 0.1, 0.1111111111111111, 0.2222222222222222, 0.047619047619047616, 0, 0.16666666666666666, 0.044444444444444446, 0.08695652173913043, 0.05714285714285714, 0.06060606060606061, 0.07142857142857142, 0.047619047619047616, 0.03333333333333333, 0.06060606060606061, 0.0392156862745098, 0.14285714285714285 ]
def cpe_subset(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is (non-proper) SUBSET. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is SUBSET, otherwise False. :rtype: boolean """ # If any pairwise comparison returned something other than SUBSET # or EQUAL, then SUBSET is False. for att, result in CPESet2_3.compare_wfns(source, target): isSubset = result == CPESet2_3.LOGICAL_VALUE_SUBSET isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL if (not isSubset) and (not isEqual): return False return True
[ "def", "cpe_subset", "(", "cls", ",", "source", ",", "target", ")", ":", "# If any pairwise comparison returned something other than SUBSET", "# or EQUAL, then SUBSET is False.", "for", "att", ",", "result", "in", "CPESet2_3", ".", "compare_wfns", "(", "source", ",", "target", ")", ":", "isSubset", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_SUBSET", "isEqual", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_EQUAL", "if", "(", "not", "isSubset", ")", "and", "(", "not", "isEqual", ")", ":", "return", "False", "return", "True" ]
40.7
[ 0.027777777777777776, 0.18181818181818182, 0.027777777777777776, 0.04081632653061224, 0, 0.057692307692307696, 0.05555555555555555, 0.04411764705882353, 0.05128205128205128, 0.13043478260869565, 0.18181818181818182, 0, 0.0273972602739726, 0.04878048780487805, 0.030303030303030304, 0.031746031746031744, 0.03278688524590164, 0.041666666666666664, 0.07142857142857142, 0.10526315789473684 ]
def merge_dictionaries(base_dict, extra_dict): """ merge two dictionaries. if both have a same key, the one from extra_dict is taken :param base_dict: first dictionary :type base_dict: dict :param extra_dict: second dictionary :type extra_dict: dict :return: a merge of the two dictionaries :rtype: dicts """ new_dict = base_dict.copy() new_dict.update(extra_dict) return new_dict
[ "def", "merge_dictionaries", "(", "base_dict", ",", "extra_dict", ")", ":", "new_dict", "=", "base_dict", ".", "copy", "(", ")", "new_dict", ".", "update", "(", "extra_dict", ")", "return", "new_dict" ]
26.1875
[ 0.021739130434782608, 0.2857142857142857, 0.07407407407407407, 0, 0.03278688524590164, 0, 0.07894736842105263, 0.12, 0.075, 0.11538461538461539, 0.06818181818181818, 0.17647058823529413, 0.2857142857142857, 0.06451612903225806, 0.06451612903225806, 0.10526315789473684 ]
def visit(self, node): '''The main visit function. Visits the passed-in node and calls finalize. ''' for token in self.itervisit(node): pass result = self.finalize() if result is not self: return result
[ "def", "visit", "(", "self", ",", "node", ")", ":", "for", "token", "in", "self", ".", "itervisit", "(", "node", ")", ":", "pass", "result", "=", "self", ".", "finalize", "(", ")", "if", "result", "is", "not", "self", ":", "return", "result" ]
29.555556
[ 0.045454545454545456, 0.028169014084507043, 0.11764705882352941, 0.18181818181818182, 0.047619047619047616, 0.125, 0.0625, 0.06666666666666667, 0.08 ]
def get_cli(cls) -> click.Group: """Get a :mod:`click` main function with added BEL namespace commands.""" main = super().get_cli() if cls.is_namespace: @main.group() def belns(): """Manage BEL namespace.""" cls._cli_add_to_bel_namespace(belns) cls._cli_add_clear_bel_namespace(belns) cls._cli_add_write_bel_namespace(belns) if cls.is_annotation: @main.group() def belanno(): """Manage BEL annotation.""" cls._cli_add_write_bel_annotation(belanno) return main
[ "def", "get_cli", "(", "cls", ")", "->", "click", ".", "Group", ":", "main", "=", "super", "(", ")", ".", "get_cli", "(", ")", "if", "cls", ".", "is_namespace", ":", "@", "main", ".", "group", "(", ")", "def", "belns", "(", ")", ":", "\"\"\"Manage BEL namespace.\"\"\"", "cls", ".", "_cli_add_to_bel_namespace", "(", "belns", ")", "cls", ".", "_cli_add_clear_bel_namespace", "(", "belns", ")", "cls", ".", "_cli_add_write_bel_namespace", "(", "belns", ")", "if", "cls", ".", "is_annotation", ":", "@", "main", ".", "group", "(", ")", "def", "belanno", "(", ")", ":", "\"\"\"Manage BEL annotation.\"\"\"", "cls", ".", "_cli_add_write_bel_annotation", "(", "belanno", ")", "return", "main" ]
29.142857
[ 0.03125, 0.037037037037037035, 0.0625, 0, 0.07142857142857142, 0.08, 0.08333333333333333, 0.046511627906976744, 0, 0.041666666666666664, 0.0392156862745098, 0.0392156862745098, 0, 0.06896551724137931, 0.08, 0.07692307692307693, 0.045454545454545456, 0, 0.037037037037037035, 0, 0.10526315789473684 ]
def arc_center(points): """ Given three points on an arc find: center, radius, normal, and angle. This uses the fact that the intersection of the perp bisectors of the segments between the control points is the center of the arc. Parameters --------- points : (3, dimension) float Points in space, where dimension is either 2 or 3 Returns --------- result : dict Has keys: 'center': (d,) float, cartesian center of the arc 'radius': float, radius of the arc 'normal': (3,) float, the plane normal. 'angle': (2,) float, angle of start and end, in radians 'span' : float, angle swept by the arc, in radians """ # it's a lot easier to treat 2D as 3D with a zero Z value points, is_2D = util.stack_3D(points, return_2D=True) # find the two edge vectors of the triangle edge_direction = np.diff(points, axis=0) edge_midpoints = (edge_direction * 0.5) + points[:2] # three points define a plane, so we find its normal vector plane_normal = np.cross(*edge_direction[::-1]) plane_normal /= np.linalg.norm(plane_normal) # unit vector along edges vector_edge = (edge_direction / np.linalg.norm(edge_direction, axis=1).reshape((-1, 1))) # perpendicular cector to each segment vector_perp = np.cross(vector_edge, plane_normal) vector_perp /= np.linalg.norm(vector_perp, axis=1).reshape((-1, 1)) # run the line- line intersection to find the point intersects, center = line_line(origins=edge_midpoints, directions=vector_perp, plane_normal=plane_normal) if not intersects: raise ValueError('Segments do not intersect!') # radius is euclidean distance radius = ((points[0] - center) ** 2).sum() ** .5 # vectors from points on arc to center point vector = points - center vector /= np.linalg.norm(vector, axis=1).reshape((-1, 1)) angle = np.arccos(np.clip(np.dot(*vector[[0, 2]]), -1.0, 1.0)) large_arc = (abs(angle) > tol.zero and np.dot(*edge_direction) < 0.0) if large_arc: angle = (np.pi * 2) - angle angles = np.arctan2(*vector[:, :2].T[::-1]) + np.pi * 2 angles_sorted = np.sort(angles[[0, 2]]) reverse = angles_sorted[0] < angles[1] < angles_sorted[1] angles_sorted = angles_sorted[::(1 - int(not reverse) * 2)] result = {'center': center[:(3 - is_2D)], 'radius': radius, 'normal': plane_normal, 'span': angle, 'angles': angles_sorted} return result
[ "def", "arc_center", "(", "points", ")", ":", "# it's a lot easier to treat 2D as 3D with a zero Z value", "points", ",", "is_2D", "=", "util", ".", "stack_3D", "(", "points", ",", "return_2D", "=", "True", ")", "# find the two edge vectors of the triangle", "edge_direction", "=", "np", ".", "diff", "(", "points", ",", "axis", "=", "0", ")", "edge_midpoints", "=", "(", "edge_direction", "*", "0.5", ")", "+", "points", "[", ":", "2", "]", "# three points define a plane, so we find its normal vector", "plane_normal", "=", "np", ".", "cross", "(", "*", "edge_direction", "[", ":", ":", "-", "1", "]", ")", "plane_normal", "/=", "np", ".", "linalg", ".", "norm", "(", "plane_normal", ")", "# unit vector along edges", "vector_edge", "=", "(", "edge_direction", "/", "np", ".", "linalg", ".", "norm", "(", "edge_direction", ",", "axis", "=", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", "# perpendicular cector to each segment", "vector_perp", "=", "np", ".", "cross", "(", "vector_edge", ",", "plane_normal", ")", "vector_perp", "/=", "np", ".", "linalg", ".", "norm", "(", "vector_perp", ",", "axis", "=", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "# run the line- line intersection to find the point", "intersects", ",", "center", "=", "line_line", "(", "origins", "=", "edge_midpoints", ",", "directions", "=", "vector_perp", ",", "plane_normal", "=", "plane_normal", ")", "if", "not", "intersects", ":", "raise", "ValueError", "(", "'Segments do not intersect!'", ")", "# radius is euclidean distance", "radius", "=", "(", "(", "points", "[", "0", "]", "-", "center", ")", "**", "2", ")", ".", "sum", "(", ")", "**", ".5", "# vectors from points on arc to center point", "vector", "=", "points", "-", "center", "vector", "/=", "np", ".", "linalg", ".", "norm", "(", "vector", ",", "axis", "=", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "angle", "=", "np", ".", "arccos", "(", "np", ".", "clip", "(", "np", ".", "dot", "(", "*", "vector", "[", "[", "0", ",", "2", "]", "]", ")", ",", "-", "1.0", ",", "1.0", ")", ")", "large_arc", "=", "(", "abs", "(", "angle", ")", ">", "tol", ".", "zero", "and", "np", ".", "dot", "(", "*", "edge_direction", ")", "<", "0.0", ")", "if", "large_arc", ":", "angle", "=", "(", "np", ".", "pi", "*", "2", ")", "-", "angle", "angles", "=", "np", ".", "arctan2", "(", "*", "vector", "[", ":", ",", ":", "2", "]", ".", "T", "[", ":", ":", "-", "1", "]", ")", "+", "np", ".", "pi", "*", "2", "angles_sorted", "=", "np", ".", "sort", "(", "angles", "[", "[", "0", ",", "2", "]", "]", ")", "reverse", "=", "angles_sorted", "[", "0", "]", "<", "angles", "[", "1", "]", "<", "angles_sorted", "[", "1", "]", "angles_sorted", "=", "angles_sorted", "[", ":", ":", "(", "1", "-", "int", "(", "not", "reverse", ")", "*", "2", ")", "]", "result", "=", "{", "'center'", ":", "center", "[", ":", "(", "3", "-", "is_2D", ")", "]", ",", "'radius'", ":", "radius", ",", "'normal'", ":", "plane_normal", ",", "'span'", ":", "angle", ",", "'angles'", ":", "angles_sorted", "}", "return", "result" ]
34.6
[ 0.043478260869565216, 0.2857142857142857, 0.05263157894736842, 0.05263157894736842, 0, 0.03571428571428571, 0.03571428571428571, 0.06896551724137931, 0, 0.14285714285714285, 0.15384615384615385, 0.09090909090909091, 0.05454545454545454, 0, 0.18181818181818182, 0.15384615384615385, 0.17647058823529413, 0.2, 0.03389830508474576, 0.045454545454545456, 0.04081632653061224, 0.030303030303030304, 0.04918032786885246, 0.2857142857142857, 0.03278688524590164, 0.03508771929824561, 0, 0.0425531914893617, 0.045454545454545456, 0.03571428571428571, 0, 0.031746031746031744, 0.04, 0.041666666666666664, 0, 0.06896551724137931, 0.08571428571428572, 0.05333333333333334, 0, 0.047619047619047616, 0.03773584905660377, 0.028169014084507043, 0, 0.03636363636363636, 0.05172413793103448, 0.06896551724137931, 0.08196721311475409, 0, 0.09090909090909091, 0.037037037037037035, 0, 0.058823529411764705, 0.038461538461538464, 0, 0.041666666666666664, 0.07142857142857142, 0.03278688524590164, 0, 0.030303030303030304, 0.07142857142857142, 0.0851063829787234, 0.11764705882352941, 0.05714285714285714, 0, 0.03389830508474576, 0.046511627906976744, 0.03278688524590164, 0.031746031746031744, 0, 0.06666666666666667, 0.0967741935483871, 0.08108108108108109, 0.10714285714285714, 0.10526315789473684, 0.11764705882352941 ]
def sum_data(filter_data, is_bw): """ caculate sum""" for index in range(len(filter_data) - 1): if filter_data[index][0] > filter_data[index + 1][0]: max_index = index + 1 break else: max_index = len(filter_data) print("max_index: ", max_index + 1) num_jobs = int(round(len(filter_data) * 1.0 / max_index)) print("num_jobs: ", num_jobs) dict_time = Counter(filter_data[:, 0]) list_sum = [] for time_index in range(1, max_index + 1): if dict_time.get(time_index * 1000, 0) != num_jobs: print("[WARNING] Time %d, number of data %d != num_jobs %d" % ( time_index * 1000, dict_time.get(time_index * 1000, 0), num_jobs )) continue filter_mask = (filter_data[:, 0] == time_index * 1000) sum_rst = np.sum(filter_data[filter_mask][:, 1]) if is_bw: sum_rst = sum_rst / 1024 list_sum.append([time_index, sum_rst]) return np.array(list_sum)
[ "def", "sum_data", "(", "filter_data", ",", "is_bw", ")", ":", "for", "index", "in", "range", "(", "len", "(", "filter_data", ")", "-", "1", ")", ":", "if", "filter_data", "[", "index", "]", "[", "0", "]", ">", "filter_data", "[", "index", "+", "1", "]", "[", "0", "]", ":", "max_index", "=", "index", "+", "1", "break", "else", ":", "max_index", "=", "len", "(", "filter_data", ")", "print", "(", "\"max_index: \"", ",", "max_index", "+", "1", ")", "num_jobs", "=", "int", "(", "round", "(", "len", "(", "filter_data", ")", "*", "1.0", "/", "max_index", ")", ")", "print", "(", "\"num_jobs: \"", ",", "num_jobs", ")", "dict_time", "=", "Counter", "(", "filter_data", "[", ":", ",", "0", "]", ")", "list_sum", "=", "[", "]", "for", "time_index", "in", "range", "(", "1", ",", "max_index", "+", "1", ")", ":", "if", "dict_time", ".", "get", "(", "time_index", "*", "1000", ",", "0", ")", "!=", "num_jobs", ":", "print", "(", "\"[WARNING] Time %d, number of data %d != num_jobs %d\"", "%", "(", "time_index", "*", "1000", ",", "dict_time", ".", "get", "(", "time_index", "*", "1000", ",", "0", ")", ",", "num_jobs", ")", ")", "continue", "filter_mask", "=", "(", "filter_data", "[", ":", ",", "0", "]", "==", "time_index", "*", "1000", ")", "sum_rst", "=", "np", ".", "sum", "(", "filter_data", "[", "filter_mask", "]", "[", ":", ",", "1", "]", ")", "if", "is_bw", ":", "sum_rst", "=", "sum_rst", "/", "1024", "list_sum", ".", "append", "(", "[", "time_index", ",", "sum_rst", "]", ")", "return", "np", ".", "array", "(", "list_sum", ")" ]
36.285714
[ 0.029411764705882353, 0.041666666666666664, 0.06521739130434782, 0.04838709677419355, 0.029411764705882353, 0.05555555555555555, 0.3, 0.02702702702702703, 0.025, 0.016129032258064516, 0.029411764705882353, 1, 0.023255813953488372, 0.05555555555555555, 0.06382978723404255, 1, 0.05, 0.02631578947368421, 0.024691358024691357, 0.13333333333333333, 0.047619047619047616, 0.015873015873015872, 1, 0.017543859649122806, 0.16666666666666666, 0.02702702702702703, 0.02127659574468085, 0.06896551724137931 ]
def init(self): ''' Initialize the device. Parameters of visa.ResourceManager().open_resource() ''' super(Visa, self).init() backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA) rm = visa.ResourceManager(backend) try: logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources())) except NotImplementedError: # some backends do not always implement the list_resources function logger.info('BASIL VISA TL with %s backend', backend) self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)})
[ "def", "init", "(", "self", ")", ":", "super", "(", "Visa", ",", "self", ")", ".", "init", "(", ")", "backend", "=", "self", ".", "_init", ".", "get", "(", "'backend'", ",", "''", ")", "# Empty string means std. backend (NI VISA)", "rm", "=", "visa", ".", "ResourceManager", "(", "backend", ")", "try", ":", "logger", ".", "info", "(", "'BASIL VISA TL with %s backend found the following devices: %s'", ",", "backend", ",", "\", \"", ".", "join", "(", "rm", ".", "list_resources", "(", ")", ")", ")", "except", "NotImplementedError", ":", "# some backends do not always implement the list_resources function", "logger", ".", "info", "(", "'BASIL VISA TL with %s backend'", ",", "backend", ")", "self", ".", "_resource", "=", "rm", ".", "open_resource", "(", "*", "*", "{", "key", ":", "value", "for", "key", ",", "value", "in", "self", ".", "_init", ".", "items", "(", ")", "if", "key", "not", "in", "(", "\"backend\"", ",", ")", "}", ")" ]
55.692308
[ 0.06666666666666667, 0.18181818181818182, 0.06666666666666667, 0.03333333333333333, 0.18181818181818182, 0.0625, 0.03260869565217391, 0.047619047619047616, 0.16666666666666666, 0.023255813953488372, 0.028846153846153848, 0.03076923076923077, 0.024793388429752067 ]
def search_process(process, pattern, minAddr = None, maxAddr = None, bufferPages = None, overlapping = False): """ Search for the given pattern within the process memory. @type process: L{Process} @param process: Process to search. @type pattern: L{Pattern} @param pattern: Pattern to search for. It must be an instance of a subclass of L{Pattern}. The following L{Pattern} subclasses are provided by WinAppDbg: - L{BytePattern} - L{TextPattern} - L{RegExpPattern} - L{HexPattern} You can also write your own subclass of L{Pattern} for customized searches. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @type bufferPages: int @param bufferPages: (Optional) Number of memory pages to buffer when performing the search. Valid values are: - C{0} or C{None}: Automatically determine the required buffer size. May not give complete results for regular expressions that match variable sized strings. - C{> 0}: Set the buffer size, in memory pages. - C{< 0}: Disable buffering entirely. This may give you a little speed gain at the cost of an increased memory usage. If the target process has very large contiguous memory regions it may actually be slower or even fail. It's also the only way to guarantee complete results for regular expressions that match variable sized strings. @type overlapping: bool @param overlapping: C{True} to allow overlapping results, C{False} otherwise. Overlapping results yield the maximum possible number of results. For example, if searching for "AAAA" within "AAAAAAAA" at address C{0x10000}, when overlapping is turned off the following matches are yielded:: (0x10000, 4, "AAAA") (0x10004, 4, "AAAA") If overlapping is turned on, the following matches are yielded:: (0x10000, 4, "AAAA") (0x10001, 4, "AAAA") (0x10002, 4, "AAAA") (0x10003, 4, "AAAA") (0x10004, 4, "AAAA") As you can see, the middle results are overlapping the last two. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. """ # Do some namespace lookups of symbols we'll be using frequently. MEM_COMMIT = win32.MEM_COMMIT PAGE_GUARD = win32.PAGE_GUARD page = MemoryAddresses.pageSize read = pattern.read find = pattern.find # Calculate the address range. if minAddr is None: minAddr = 0 if maxAddr is None: maxAddr = win32.LPVOID(-1).value # XXX HACK # Calculate the buffer size from the number of pages. if bufferPages is None: try: size = MemoryAddresses.\ align_address_to_page_end(len(pattern)) + page except NotImplementedError: size = None elif bufferPages > 0: size = page * (bufferPages + 1) else: size = None # Get the memory map of the process. memory_map = process.iter_memory_map(minAddr, maxAddr) # Perform search with buffering enabled. if size: # Loop through all memory blocks containing data. buffer = "" # buffer to hold the memory data prev_addr = 0 # previous memory block address last = 0 # position of the last match delta = 0 # delta of last read address and start of buffer for mbi in memory_map: # Skip blocks with no data to search on. if not mbi.has_content(): continue # Get the address and size of this block. address = mbi.BaseAddress # current address to search on block_size = mbi.RegionSize # total size of the block if address >= maxAddr: break end = address + block_size # end address of the block # If the block is contiguous to the previous block, # coalesce the new data in the buffer. if delta and address == prev_addr: buffer += read(process, address, page) # If not, clear the buffer and read new data. else: buffer = read(process, address, min(size, block_size)) last = 0 delta = 0 # Search for the pattern in this block. while 1: # Yield each match of the pattern in the buffer. pos, length = find(buffer, last) while pos >= last: match_addr = address + pos - delta if minAddr <= match_addr < maxAddr: result = pattern.found( match_addr, length, buffer [ pos : pos + length ] ) if result is not None: yield result if overlapping: last = pos + 1 else: last = pos + length pos, length = find(buffer, last) # Advance to the next page. address = address + page block_size = block_size - page prev_addr = address # Fix the position of the last match. last = last - page if last < 0: last = 0 # Remove the first page in the buffer. buffer = buffer[ page : ] delta = page # If we haven't reached the end of the block yet, # read the next page in the block and keep seaching. if address < end: buffer = buffer + read(process, address, page) # Otherwise, we're done searching this block. else: break # Perform search with buffering disabled. else: # Loop through all memory blocks containing data. for mbi in memory_map: # Skip blocks with no data to search on. if not mbi.has_content(): continue # Get the address and size of this block. address = mbi.BaseAddress block_size = mbi.RegionSize if address >= maxAddr: break; # Read the whole memory region. buffer = process.read(address, block_size) # Search for the pattern in this region. pos, length = find(buffer) last = 0 while pos >= last: match_addr = address + pos if minAddr <= match_addr < maxAddr: result = pattern.found( match_addr, length, buffer [ pos : pos + length ] ) if result is not None: yield result if overlapping: last = pos + 1 else: last = pos + length pos, length = find(buffer, last)
[ "def", "search_process", "(", "process", ",", "pattern", ",", "minAddr", "=", "None", ",", "maxAddr", "=", "None", ",", "bufferPages", "=", "None", ",", "overlapping", "=", "False", ")", ":", "# Do some namespace lookups of symbols we'll be using frequently.", "MEM_COMMIT", "=", "win32", ".", "MEM_COMMIT", "PAGE_GUARD", "=", "win32", ".", "PAGE_GUARD", "page", "=", "MemoryAddresses", ".", "pageSize", "read", "=", "pattern", ".", "read", "find", "=", "pattern", ".", "find", "# Calculate the address range.", "if", "minAddr", "is", "None", ":", "minAddr", "=", "0", "if", "maxAddr", "is", "None", ":", "maxAddr", "=", "win32", ".", "LPVOID", "(", "-", "1", ")", ".", "value", "# XXX HACK", "# Calculate the buffer size from the number of pages.", "if", "bufferPages", "is", "None", ":", "try", ":", "size", "=", "MemoryAddresses", ".", "align_address_to_page_end", "(", "len", "(", "pattern", ")", ")", "+", "page", "except", "NotImplementedError", ":", "size", "=", "None", "elif", "bufferPages", ">", "0", ":", "size", "=", "page", "*", "(", "bufferPages", "+", "1", ")", "else", ":", "size", "=", "None", "# Get the memory map of the process.", "memory_map", "=", "process", ".", "iter_memory_map", "(", "minAddr", ",", "maxAddr", ")", "# Perform search with buffering enabled.", "if", "size", ":", "# Loop through all memory blocks containing data.", "buffer", "=", "\"\"", "# buffer to hold the memory data", "prev_addr", "=", "0", "# previous memory block address", "last", "=", "0", "# position of the last match", "delta", "=", "0", "# delta of last read address and start of buffer", "for", "mbi", "in", "memory_map", ":", "# Skip blocks with no data to search on.", "if", "not", "mbi", ".", "has_content", "(", ")", ":", "continue", "# Get the address and size of this block.", "address", "=", "mbi", ".", "BaseAddress", "# current address to search on", "block_size", "=", "mbi", ".", "RegionSize", "# total size of the block", "if", "address", ">=", "maxAddr", ":", "break", "end", "=", "address", "+", "block_size", "# end address of the block", "# If the block is contiguous to the previous block,", "# coalesce the new data in the buffer.", "if", "delta", "and", "address", "==", "prev_addr", ":", "buffer", "+=", "read", "(", "process", ",", "address", ",", "page", ")", "# If not, clear the buffer and read new data.", "else", ":", "buffer", "=", "read", "(", "process", ",", "address", ",", "min", "(", "size", ",", "block_size", ")", ")", "last", "=", "0", "delta", "=", "0", "# Search for the pattern in this block.", "while", "1", ":", "# Yield each match of the pattern in the buffer.", "pos", ",", "length", "=", "find", "(", "buffer", ",", "last", ")", "while", "pos", ">=", "last", ":", "match_addr", "=", "address", "+", "pos", "-", "delta", "if", "minAddr", "<=", "match_addr", "<", "maxAddr", ":", "result", "=", "pattern", ".", "found", "(", "match_addr", ",", "length", ",", "buffer", "[", "pos", ":", "pos", "+", "length", "]", ")", "if", "result", "is", "not", "None", ":", "yield", "result", "if", "overlapping", ":", "last", "=", "pos", "+", "1", "else", ":", "last", "=", "pos", "+", "length", "pos", ",", "length", "=", "find", "(", "buffer", ",", "last", ")", "# Advance to the next page.", "address", "=", "address", "+", "page", "block_size", "=", "block_size", "-", "page", "prev_addr", "=", "address", "# Fix the position of the last match.", "last", "=", "last", "-", "page", "if", "last", "<", "0", ":", "last", "=", "0", "# Remove the first page in the buffer.", "buffer", "=", "buffer", "[", "page", ":", "]", "delta", "=", "page", "# If we haven't reached the end of the block yet,", "# read the next page in the block and keep seaching.", "if", "address", "<", "end", ":", "buffer", "=", "buffer", "+", "read", "(", "process", ",", "address", ",", "page", ")", "# Otherwise, we're done searching this block.", "else", ":", "break", "# Perform search with buffering disabled.", "else", ":", "# Loop through all memory blocks containing data.", "for", "mbi", "in", "memory_map", ":", "# Skip blocks with no data to search on.", "if", "not", "mbi", ".", "has_content", "(", ")", ":", "continue", "# Get the address and size of this block.", "address", "=", "mbi", ".", "BaseAddress", "block_size", "=", "mbi", ".", "RegionSize", "if", "address", ">=", "maxAddr", ":", "break", "# Read the whole memory region.", "buffer", "=", "process", ".", "read", "(", "address", ",", "block_size", ")", "# Search for the pattern in this region.", "pos", ",", "length", "=", "find", "(", "buffer", ")", "last", "=", "0", "while", "pos", ">=", "last", ":", "match_addr", "=", "address", "+", "pos", "if", "minAddr", "<=", "match_addr", "<", "maxAddr", ":", "result", "=", "pattern", ".", "found", "(", "match_addr", ",", "length", ",", "buffer", "[", "pos", ":", "pos", "+", "length", "]", ")", "if", "result", "is", "not", "None", ":", "yield", "result", "if", "overlapping", ":", "last", "=", "pos", "+", "1", "else", ":", "last", "=", "pos", "+", "length", "pos", ",", "length", "=", "find", "(", "buffer", ",", "last", ")" ]
39.188679
[ 0.07692307692307693, 0.05357142857142857, 0.05, 0.06451612903225806, 0.18181818181818182, 0.031746031746031744, 0, 0.058823529411764705, 0.047619047619047616, 0, 0.058823529411764705, 0.043478260869565216, 0.031746031746031744, 0, 0.02702702702702703, 0.10344827586206896, 0.10344827586206896, 0.0967741935483871, 0.10714285714285714, 0, 0.025974025974025976, 0.09523809523809523, 0, 0.07407407407407407, 0.02666666666666667, 0, 0.07407407407407407, 0.02702702702702703, 0, 0.06451612903225806, 0.02631578947368421, 0.038461538461538464, 0.0967741935483871, 0.03896103896103896, 0.04, 0.10344827586206896, 0.06666666666666667, 0.05194805194805195, 0.04054054054054054, 0.03896103896103896, 0.0410958904109589, 0.039473684210526314, 0.07894736842105263, 0, 0.0625, 0.02702702702702703, 0.09090909090909091, 0, 0.025974025974025976, 0, 0.025974025974025976, 0.02631578947368421, 0.12, 0.05555555555555555, 0.05555555555555555, 0, 0.039473684210526314, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0, 0.02631578947368421, 0, 0.0784313725490196, 0.02702702702702703, 0.04838709677419355, 0.04918032786885246, 0.061224489795918366, 0, 0.02666666666666667, 0.07407407407407407, 0.18181818181818182, 0, 0.0273972602739726, 0.05405405405405406, 0.05405405405405406, 0.05128205128205128, 0.07407407407407407, 0.07407407407407407, 0, 0.05263157894736842, 0.07407407407407407, 0.08695652173913043, 0.07407407407407407, 0.03571428571428571, 0, 0.03278688524590164, 0.06451612903225806, 0.125, 0.05, 0.02702702702702703, 0.05128205128205128, 0.07407407407407407, 0.06896551724137931, 0.0425531914893617, 0.15384615384615385, 0.07407407407407407, 0, 0.045454545454545456, 0.03225806451612903, 0, 0.041666666666666664, 0.125, 0, 0.03278688524590164, 0.06666666666666667, 0.05084745762711865, 0.05357142857142857, 0.039473684210526314, 0.058823529411764705, 0, 0.03571428571428571, 0.04878048780487805, 0.07142857142857142, 0, 0.03508771929824561, 0.038461538461538464, 0.0273972602739726, 0.05263157894736842, 0.08, 0.02702702702702703, 0, 0.029850746268656716, 0.037037037037037035, 0.04, 0.034482758620689655, 0, 0.03278688524590164, 0.09523809523809523, 0.02702702702702703, 0.1, 0.1, 0, 0.03636363636363636, 0.08333333333333333, 0, 0.029411764705882353, 0.038461538461538464, 0.05263157894736842, 0.034482758620689655, 0.03389830508474576, 0.058823529411764705, 0.031746031746031744, 0.10666666666666667, 0.04, 0.045454545454545456, 0.05128205128205128, 0.047619047619047616, 0.06896551724137931, 0.0425531914893617, 0.03571428571428571, 0, 0.0425531914893617, 0.06382978723404255, 0.04, 0.075, 0, 0.03508771929824561, 0.05263157894736842, 0.0625, 0.0625, 0, 0.034482758620689655, 0.1111111111111111, 0.09090909090909091, 0, 0.028985507246376812, 0.027777777777777776, 0.05405405405405406, 0.02857142857142857, 0, 0.03076923076923077, 0.08, 0.06896551724137931, 0, 0.04081632653061224, 0.15384615384615385, 0, 0.03278688524590164, 0.058823529411764705, 0, 0.03571428571428571, 0.04878048780487805, 0.07142857142857142, 0, 0.03508771929824561, 0.06818181818181818, 0.046511627906976744, 0.05263157894736842, 0.11538461538461539, 0, 0.0425531914893617, 0.034482758620689655, 0, 0.03571428571428571, 0.047619047619047616, 0.08333333333333333, 0.058823529411764705, 0.043478260869565216, 0.03636363636363636, 0.06382978723404255, 0.03389830508474576, 0.11267605633802817, 0.043478260869565216, 0.05, 0.05714285714285714, 0.05263157894736842, 0.08, 0.046511627906976744, 0.038461538461538464 ]
def aggregate(self, rankings, epsilon, max_iters): """ Description: Minorization-Maximization algorithm which returns an estimate of the ground-truth parameters, gamma for the given data. Parameters: rankings: set of rankings to aggregate epsilon: convergence condition value, set to None for iteration only max_iters: maximum number of iterations of MM algorithm """ # compute the matrix w, the numbers of pairwise wins: w = np.zeros((self.m, self.m)) for ranking in rankings: localw = np.zeros((self.m, self.m)) for ind1, alt1 in enumerate(self.alts): for ind2, alt2 in enumerate(self.alts): if ind1 == ind2: continue alt1_rank = util.get_index_nested(ranking, alt1) alt2_rank = util.get_index_nested(ranking, alt2) if alt1_rank < alt2_rank: # alt 1 is ranked higher localw[ind1][ind2] = 1 w += localw W = w.sum(axis=1) # gamma_t is the value of gamma at time = t # gamma_t1 is the value of gamma at time t = t+1 (the next iteration) # initial arbitrary value for gamma: gamma_t = np.ones(self.m) / self.m gamma_t1 = np.empty(self.m) for f in range(max_iters): for i in range(self.m): s = 0 # sum of updating function for j in range(self.m): if j != i: s += (w[j][i] + w[i][j]) / (gamma_t[i]+gamma_t[j]) gamma_t1[i] = W[i] / s gamma_t1 /= np.sum(gamma_t1) if epsilon != None and np.all(np.absolute(gamma_t1 - gamma_t) < epsilon): alt_scores = {cand: gamma_t1[ind] for ind, cand in enumerate(self.alts)} self.create_rank_dicts(alt_scores) return gamma_t1 # convergence reached before max_iters gamma_t = gamma_t1 # update gamma_t for the next iteration alt_scores = {cand: gamma_t1[ind] for ind, cand in enumerate(self.alts)} self.create_rank_dicts(alt_scores) return gamma_t1
[ "def", "aggregate", "(", "self", ",", "rankings", ",", "epsilon", ",", "max_iters", ")", ":", "# compute the matrix w, the numbers of pairwise wins:\r", "w", "=", "np", ".", "zeros", "(", "(", "self", ".", "m", ",", "self", ".", "m", ")", ")", "for", "ranking", "in", "rankings", ":", "localw", "=", "np", ".", "zeros", "(", "(", "self", ".", "m", ",", "self", ".", "m", ")", ")", "for", "ind1", ",", "alt1", "in", "enumerate", "(", "self", ".", "alts", ")", ":", "for", "ind2", ",", "alt2", "in", "enumerate", "(", "self", ".", "alts", ")", ":", "if", "ind1", "==", "ind2", ":", "continue", "alt1_rank", "=", "util", ".", "get_index_nested", "(", "ranking", ",", "alt1", ")", "alt2_rank", "=", "util", ".", "get_index_nested", "(", "ranking", ",", "alt2", ")", "if", "alt1_rank", "<", "alt2_rank", ":", "# alt 1 is ranked higher\r", "localw", "[", "ind1", "]", "[", "ind2", "]", "=", "1", "w", "+=", "localw", "W", "=", "w", ".", "sum", "(", "axis", "=", "1", ")", "# gamma_t is the value of gamma at time = t\r", "# gamma_t1 is the value of gamma at time t = t+1 (the next iteration)\r", "# initial arbitrary value for gamma:\r", "gamma_t", "=", "np", ".", "ones", "(", "self", ".", "m", ")", "/", "self", ".", "m", "gamma_t1", "=", "np", ".", "empty", "(", "self", ".", "m", ")", "for", "f", "in", "range", "(", "max_iters", ")", ":", "for", "i", "in", "range", "(", "self", ".", "m", ")", ":", "s", "=", "0", "# sum of updating function\r", "for", "j", "in", "range", "(", "self", ".", "m", ")", ":", "if", "j", "!=", "i", ":", "s", "+=", "(", "w", "[", "j", "]", "[", "i", "]", "+", "w", "[", "i", "]", "[", "j", "]", ")", "/", "(", "gamma_t", "[", "i", "]", "+", "gamma_t", "[", "j", "]", ")", "gamma_t1", "[", "i", "]", "=", "W", "[", "i", "]", "/", "s", "gamma_t1", "/=", "np", ".", "sum", "(", "gamma_t1", ")", "if", "epsilon", "!=", "None", "and", "np", ".", "all", "(", "np", ".", "absolute", "(", "gamma_t1", "-", "gamma_t", ")", "<", "epsilon", ")", ":", "alt_scores", "=", "{", "cand", ":", "gamma_t1", "[", "ind", "]", "for", "ind", ",", "cand", "in", "enumerate", "(", "self", ".", "alts", ")", "}", "self", ".", "create_rank_dicts", "(", "alt_scores", ")", "return", "gamma_t1", "# convergence reached before max_iters\r", "gamma_t", "=", "gamma_t1", "# update gamma_t for the next iteration\r", "alt_scores", "=", "{", "cand", ":", "gamma_t1", "[", "ind", "]", "for", "ind", ",", "cand", "in", "enumerate", "(", "self", ".", "alts", ")", "}", "self", ".", "create_rank_dicts", "(", "alt_scores", ")", "return", "gamma_t1" ]
41.925926
[ 0.0196078431372549, 0.08333333333333333, 0.09523809523809523, 0.015384615384615385, 0.015873015873015872, 0.03571428571428571, 0.1, 0.019230769230769232, 0.024096385542168676, 0.014705882352941176, 0.08333333333333333, 1, 0.016129032258064516, 0.02564102564102564, 0.09090909090909091, 0.020833333333333332, 0.057692307692307696, 0.05357142857142857, 0.08108108108108109, 0.030303030303030304, 0.014492753623188406, 0.014492753623188406, 0.04225352112676056, 0.02127659574468085, 0.041666666666666664, 0.038461538461538464, 1, 0.019230769230769232, 0.01282051282051282, 0.022222222222222223, 0.023255813953488372, 0.027777777777777776, 1, 0.08571428571428572, 1, 0.08333333333333333, 0.04081632653061224, 0.075, 0.0967741935483871, 0.013333333333333334, 1, 0.02564102564102564, 1, 0.024390243902439025, 1, 0.05813953488372093, 0.02247191011235955, 0.0196078431372549, 0.028169014084507043, 1, 0.028169014084507043, 0.024691358024691357, 0.023255813953488372, 0.08695652173913043 ]
def parse_connection_string_psycopg2(connection_string): """ parses psycopg2 consumable connection string :param connection_string: :return: return dictionary with connection string parts """ conn_prepared = {} conn_parsed = urlparse(connection_string) if not conn_parsed.hostname: _re_dbstr = re.compile(r'\bhost=(?P<host>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'dbname=(?P<dbname>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'port=(?P<port>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'user=(?P<user>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'password=(?P<password>[0-9a-zA-Z_.!@#$%^&*()~]+)\b', re.IGNORECASE) for match in _re_dbstr.finditer(connection_string): match_dict = match.groupdict() if match_dict['host']: conn_prepared['host'] = match_dict['host'] if match_dict['port']: conn_prepared['port'] = match_dict['port'] if match_dict['dbname']: conn_prepared['dbname'] = match_dict['dbname'] if match_dict['user']: conn_prepared['user'] = match_dict['user'] if match_dict['password']: conn_prepared['password'] = match_dict['password'] else: conn_prepared = { 'host': conn_parsed.hostname, 'port': conn_parsed.port, 'dbname': conn_parsed.path, 'user': conn_parsed.username, 'password': conn_parsed.password } return conn_prepared
[ "def", "parse_connection_string_psycopg2", "(", "connection_string", ")", ":", "conn_prepared", "=", "{", "}", "conn_parsed", "=", "urlparse", "(", "connection_string", ")", "if", "not", "conn_parsed", ".", "hostname", ":", "_re_dbstr", "=", "re", ".", "compile", "(", "r'\\bhost=(?P<host>[0-9a-zA-Z_.!@#$%^&*()~]+)|'", "r'dbname=(?P<dbname>[0-9a-zA-Z_.!@#$%^&*()~]+)|'", "r'port=(?P<port>[0-9a-zA-Z_.!@#$%^&*()~]+)|'", "r'user=(?P<user>[0-9a-zA-Z_.!@#$%^&*()~]+)|'", "r'password=(?P<password>[0-9a-zA-Z_.!@#$%^&*()~]+)\\b'", ",", "re", ".", "IGNORECASE", ")", "for", "match", "in", "_re_dbstr", ".", "finditer", "(", "connection_string", ")", ":", "match_dict", "=", "match", ".", "groupdict", "(", ")", "if", "match_dict", "[", "'host'", "]", ":", "conn_prepared", "[", "'host'", "]", "=", "match_dict", "[", "'host'", "]", "if", "match_dict", "[", "'port'", "]", ":", "conn_prepared", "[", "'port'", "]", "=", "match_dict", "[", "'port'", "]", "if", "match_dict", "[", "'dbname'", "]", ":", "conn_prepared", "[", "'dbname'", "]", "=", "match_dict", "[", "'dbname'", "]", "if", "match_dict", "[", "'user'", "]", ":", "conn_prepared", "[", "'user'", "]", "=", "match_dict", "[", "'user'", "]", "if", "match_dict", "[", "'password'", "]", ":", "conn_prepared", "[", "'password'", "]", "=", "match_dict", "[", "'password'", "]", "else", ":", "conn_prepared", "=", "{", "'host'", ":", "conn_parsed", ".", "hostname", ",", "'port'", ":", "conn_parsed", ".", "port", ",", "'dbname'", ":", "conn_parsed", ".", "path", ",", "'user'", ":", "conn_parsed", ".", "username", ",", "'password'", ":", "conn_parsed", ".", "password", "}", "return", "conn_prepared" ]
43.305556
[ 0.017857142857142856, 0.2857142857142857, 0.041666666666666664, 0.10344827586206896, 0.05084745762711865, 0.2857142857142857, 0.09090909090909091, 0.044444444444444446, 0.0625, 0.03896103896103896, 0.0379746835443038, 0.04, 0.04, 0.05, 0.03389830508474576, 0.047619047619047616, 0.058823529411764705, 0.034482758620689655, 0.058823529411764705, 0.034482758620689655, 0.05555555555555555, 0.03225806451612903, 0.058823529411764705, 0.034482758620689655, 0.05263157894736842, 0.030303030303030304, 0.2222222222222222, 0.12, 0.04878048780487805, 0.05405405405405406, 0.05128205128205128, 0.04878048780487805, 0.045454545454545456, 0.3333333333333333, 0, 0.08333333333333333 ]
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: IpAccessControlListContext for this IpAccessControlListInstance :rtype: twilio.rest.trunking.v1.trunk.ip_access_control_list.IpAccessControlListContext """ if self._context is None: self._context = IpAccessControlListContext( self._version, trunk_sid=self._solution['trunk_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "IpAccessControlListContext", "(", "self", ".", "_version", ",", "trunk_sid", "=", "self", ".", "_solution", "[", "'trunk_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
42.266667
[ 0.058823529411764705, 0.18181818181818182, 0.0375, 0.03571428571428571, 0, 0.04938271604938271, 0.042105263157894736, 0.18181818181818182, 0.06060606060606061, 0.05454545454545454, 0.06666666666666667, 0.05555555555555555, 0.07142857142857142, 0.23076923076923078, 0.07142857142857142 ]
def format_return_text(self, data, function, **kwargs): # pylint: disable=unused-argument ''' Print out YAML using the block mode ''' # emulate the yaml_out output formatter. It relies on a global __opts__ object which # we can't obviously pass in try: try: outputter = data[next(iter(data))].get('out') except (StopIteration, AttributeError): outputter = None return salt.output.string_format( {x: y['return'] for x, y in six.iteritems(data)}, out=outputter, opts=__opts__, ) except Exception as exc: import pprint log.exception( 'Exception encountered when trying to serialize %s', pprint.pformat(data) ) return 'Got an error trying to serialze/clean up the response'
[ "def", "format_return_text", "(", "self", ",", "data", ",", "function", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "# emulate the yaml_out output formatter. It relies on a global __opts__ object which", "# we can't obviously pass in", "try", ":", "try", ":", "outputter", "=", "data", "[", "next", "(", "iter", "(", "data", ")", ")", "]", ".", "get", "(", "'out'", ")", "except", "(", "StopIteration", ",", "AttributeError", ")", ":", "outputter", "=", "None", "return", "salt", ".", "output", ".", "string_format", "(", "{", "x", ":", "y", "[", "'return'", "]", "for", "x", ",", "y", "in", "six", ".", "iteritems", "(", "data", ")", "}", ",", "out", "=", "outputter", ",", "opts", "=", "__opts__", ",", ")", "except", "Exception", "as", "exc", ":", "import", "pprint", "log", ".", "exception", "(", "'Exception encountered when trying to serialize %s'", ",", "pprint", ".", "pformat", "(", "data", ")", ")", "return", "'Got an error trying to serialze/clean up the response'" ]
39.652174
[ 0.022222222222222223, 0.18181818181818182, 0.046511627906976744, 0.18181818181818182, 0.03260869565217391, 0.05555555555555555, 0.16666666666666666, 0.125, 0.03278688524590164, 0.0392156862745098, 0.0625, 0.06666666666666667, 0.03076923076923077, 0.1, 0.1, 0.23076923076923078, 0.0625, 0.08, 0.11538461538461539, 0.029411764705882353, 0.05555555555555555, 0.23076923076923078, 0.02702702702702703 ]
def Get(self,key): """Get alert by providing name, ID, or other unique key. If key is not unique and finds multiple matches only the first will be returned """ for alert in self.alerts: if alert.id == key: return(alert) elif alert.name == key: return(alert)
[ "def", "Get", "(", "self", ",", "key", ")", ":", "for", "alert", "in", "self", ".", "alerts", ":", "if", "alert", ".", "id", "==", "key", ":", "return", "(", "alert", ")", "elif", "alert", ".", "name", "==", "key", ":", "return", "(", "alert", ")" ]
26.8
[ 0.1111111111111111, 0.05172413793103448, 0, 0.046875, 0.16666666666666666, 0.6, 0, 0.1111111111111111, 0.13513513513513514, 0.12195121951219512 ]
def get_option_names(self): """returns a list of fully qualified option names. returns: a list of strings representing the Options in the source Namespace list. Each item will be fully qualified with dot delimited Namespace names. """ return [x for x in self.option_definitions.keys_breadth_first() if isinstance(self.option_definitions[x], Option)]
[ "def", "get_option_names", "(", "self", ")", ":", "return", "[", "x", "for", "x", "in", "self", ".", "option_definitions", ".", "keys_breadth_first", "(", ")", "if", "isinstance", "(", "self", ".", "option_definitions", "[", "x", "]", ",", "Option", ")", "]" ]
42.6
[ 0.037037037037037035, 0.034482758620689655, 0, 0.125, 0.02564102564102564, 0.028169014084507043, 0.07142857142857142, 0.18181818181818182, 0.04225352112676056, 0.045454545454545456 ]
def open_application(self, remote_url, alias=None, **kwargs): """Opens a new application to given Appium server. Capabilities of appium server, Android and iOS, Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md | *Option* | *Man.* | *Description* | | remote_url | Yes | Appium server url | | alias | no | alias | Examples: | Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app | | Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity | """ desired_caps = kwargs application = webdriver.Remote(str(remote_url), desired_caps) self._debug('Opened application with session id %s' % application.session_id) return self._cache.register(application, alias)
[ "def", "open_application", "(", "self", ",", "remote_url", ",", "alias", "=", "None", ",", "*", "*", "kwargs", ")", ":", "desired_caps", "=", "kwargs", "application", "=", "webdriver", ".", "Remote", "(", "str", "(", "remote_url", ")", ",", "desired_caps", ")", "self", ".", "_debug", "(", "'Opened application with session id %s'", "%", "application", ".", "session_id", ")", "return", "self", ".", "_cache", ".", "register", "(", "application", ",", "alias", ")" ]
67.5
[ 0.016129032258064516, 0.01694915254237288, 0.03571428571428571, 0.026785714285714284, 0.08196721311475409, 0.04918032786885246, 0.06557377049180328, 1, 0.1111111111111111, 0.06334841628959276, 0.04435483870967742, 0.08333333333333333, 0.03333333333333333, 0.014285714285714285, 1, 0.023255813953488372, 1, 0.03636363636363636 ]
def api_returns(return_values): """ Define the return schema of an API. 'return_values' is a dictionary mapping HTTP return code => documentation In addition to validating that the status code of the response belongs to one of the accepted status codes, it also validates that the returned object is JSON (derived from JsonResponse) In debug and test modes, failure to validate the fields will result in a 400 Bad Request response. In production mode, failure to validate will just log a warning, unless overwritten by a 'strict' setting. For example: @api_returns({ 200: 'Operation successful', 403: 'User does not have persion', 404: 'Resource not found', 404: 'User not found', }) def add(request, *args, **kwargs): if not request.user.is_superuser: return JsonResponseForbidden() # 403 return HttpResponse() # 200 """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): return_value = func(request, *args, **kwargs) if not isinstance(return_value, JsonResponse): if settings.DEBUG: return JsonResponseBadRequest('API did not return JSON') else: logger.warn('API did not return JSON') accepted_return_codes = return_values.keys() # Never block 500s - these should be handled by other # reporting mechanisms accepted_return_codes.append(500) if return_value.status_code not in accepted_return_codes: if settings.DEBUG: return JsonResponseBadRequest( 'API returned %d instead of acceptable values %s' % (return_value.status_code, accepted_return_codes) ) else: logger.warn( 'API returned %d instead of acceptable values %s', return_value.status_code, accepted_return_codes, ) return return_value return wrapped_func return decorator
[ "def", "api_returns", "(", "return_values", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return_value", "=", "func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "return_value", ",", "JsonResponse", ")", ":", "if", "settings", ".", "DEBUG", ":", "return", "JsonResponseBadRequest", "(", "'API did not return JSON'", ")", "else", ":", "logger", ".", "warn", "(", "'API did not return JSON'", ")", "accepted_return_codes", "=", "return_values", ".", "keys", "(", ")", "# Never block 500s - these should be handled by other", "# reporting mechanisms", "accepted_return_codes", ".", "append", "(", "500", ")", "if", "return_value", ".", "status_code", "not", "in", "accepted_return_codes", ":", "if", "settings", ".", "DEBUG", ":", "return", "JsonResponseBadRequest", "(", "'API returned %d instead of acceptable values %s'", "%", "(", "return_value", ".", "status_code", ",", "accepted_return_codes", ")", ")", "else", ":", "logger", ".", "warn", "(", "'API returned %d instead of acceptable values %s'", ",", "return_value", ".", "status_code", ",", "accepted_return_codes", ",", ")", "return", "return_value", "return", "wrapped_func", "return", "decorator" ]
35.508197
[ 0.03225806451612903, 0.2857142857142857, 0.05128205128205128, 0, 0.046511627906976744, 0.08108108108108109, 0.025974025974025976, 0.0273972602739726, 0.06521739130434782, 0, 0.02631578947368421, 0.06896551724137931, 0.03389830508474576, 0.037037037037037035, 0, 0.125, 0, 0.16666666666666666, 0.05555555555555555, 0.047619047619047616, 0.058823529411764705, 0.06666666666666667, 0.5, 0.05263157894736842, 0.04878048780487805, 0.04081632653061224, 0, 0.05555555555555555, 0.2857142857142857, 0.08333333333333333, 0.1, 0.0392156862745098, 0.03508771929824561, 0, 0.034482758620689655, 0.058823529411764705, 0.02631578947368421, 0.09523809523809523, 0.034482758620689655, 0, 0.03571428571428571, 0.03076923076923077, 0.058823529411764705, 0.044444444444444446, 0, 0.028985507246376812, 0.058823529411764705, 0.06, 0.02666666666666667, 0.0273972602739726, 0.14285714285714285, 0.09523809523809523, 0.09375, 0.02702702702702703, 0.04081632653061224, 0.043478260869565216, 0.14285714285714285, 0, 0.06451612903225806, 0.07407407407407407, 0.1 ]
def mathTransformToMatrix(mathTransform): """ Take a ShallowTransform object and return a 6-tuple. """ m = MathTransform().compose(mathTransform.offset, mathTransform.scale, mathTransform.rotation) return tuple(m)
[ "def", "mathTransformToMatrix", "(", "mathTransform", ")", ":", "m", "=", "MathTransform", "(", ")", ".", "compose", "(", "mathTransform", ".", "offset", ",", "mathTransform", ".", "scale", ",", "mathTransform", ".", "rotation", ")", "return", "tuple", "(", "m", ")" ]
55.5
[ 0.024390243902439025, 0.03125, 0.030612244897959183, 0.10526315789473684 ]
def set_qs_value(self, qsid, val, success_cb): """Push state to QSUSB, retry with backoff.""" self.loop.create_task(self.async_set_qs_value(qsid, val, success_cb))
[ "def", "set_qs_value", "(", "self", ",", "qsid", ",", "val", ",", "success_cb", ")", ":", "self", ".", "loop", ".", "create_task", "(", "self", ".", "async_set_qs_value", "(", "qsid", ",", "val", ",", "success_cb", ")", ")" ]
59
[ 0.021739130434782608, 0.037037037037037035, 0.025974025974025976 ]
def post(self, uri, params={}, data={}): '''A generic method to make POST requests on the given URI.''' return requests.post( urlparse.urljoin(self.BASE_URL, uri), params=params, data=json.dumps(data), verify=False, auth=self.auth, headers = {'Content-type': 'application/json', 'Accept': 'text/plain'})
[ "def", "post", "(", "self", ",", "uri", ",", "params", "=", "{", "}", ",", "data", "=", "{", "}", ")", ":", "return", "requests", ".", "post", "(", "urlparse", ".", "urljoin", "(", "self", ".", "BASE_URL", ",", "uri", ")", ",", "params", "=", "params", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "verify", "=", "False", ",", "auth", "=", "self", ".", "auth", ",", "headers", "=", "{", "'Content-type'", ":", "'application/json'", ",", "'Accept'", ":", "'text/plain'", "}", ")" ]
58.333333
[ 0.025, 0.02857142857142857, 0.10344827586206896, 0.04081632653061224, 0.07936507936507936, 0.050505050505050504 ]
def bootstraps(self, _args): """List all the bootstraps available to build with.""" for bs in Bootstrap.list_bootstraps(): bs = Bootstrap.get_bootstrap(bs, self.ctx) print('{Fore.BLUE}{Style.BRIGHT}{bs.name}{Style.RESET_ALL}' .format(bs=bs, Fore=Out_Fore, Style=Out_Style)) print(' {Fore.GREEN}depends: {bs.recipe_depends}{Fore.RESET}' .format(bs=bs, Fore=Out_Fore))
[ "def", "bootstraps", "(", "self", ",", "_args", ")", ":", "for", "bs", "in", "Bootstrap", ".", "list_bootstraps", "(", ")", ":", "bs", "=", "Bootstrap", ".", "get_bootstrap", "(", "bs", ",", "self", ".", "ctx", ")", "print", "(", "'{Fore.BLUE}{Style.BRIGHT}{bs.name}{Style.RESET_ALL}'", ".", "format", "(", "bs", "=", "bs", ",", "Fore", "=", "Out_Fore", ",", "Style", "=", "Out_Style", ")", ")", "print", "(", "' {Fore.GREEN}depends: {bs.recipe_depends}{Fore.RESET}'", ".", "format", "(", "bs", "=", "bs", ",", "Fore", "=", "Out_Fore", ")", ")" ]
56.25
[ 0.03571428571428571, 0.03225806451612903, 0.043478260869565216, 0.037037037037037035, 0.04225352112676056, 0.06153846153846154, 0.039473684210526314, 0.08333333333333333 ]
def all_down(self): """ Get the leaf object of this comparison. (This is a convenient wrapper for following the down attribute as often as you can.) :rtype: DiffLevel """ level = self while level.down: level = level.down return level
[ "def", "all_down", "(", "self", ")", ":", "level", "=", "self", "while", "level", ".", "down", ":", "level", "=", "level", ".", "down", "return", "level" ]
30
[ 0.05263157894736842, 0.18181818181818182, 0.0425531914893617, 0.03260869565217391, 0.12, 0.18181818181818182, 0.1, 0.08, 0.06666666666666667, 0.1 ]
def patch_ironic_ramdisk(self): """Clean the disk before flushing the new image. See: https://bugs.launchpad.net/ironic-lib/+bug/1550604 """ tmpdir = self.run('mktemp -d')[0].rstrip('\n') self.run('cd {tmpdir}; zcat /home/stack/ironic-python-agent.initramfs| cpio -id'.format(tmpdir=tmpdir)) self.send_file(pkg_data_filename('static', 'ironic-wipefs.patch'), '/tmp/ironic-wipefs.patch') self.run('cd {tmpdir}; patch -p0 < /tmp/ironic-wipefs.patch'.format(tmpdir=tmpdir)) self.run('cd {tmpdir}; find . | cpio --create --format=newc > /home/stack/ironic-python-agent.initramfs'.format(tmpdir=tmpdir))
[ "def", "patch_ironic_ramdisk", "(", "self", ")", ":", "tmpdir", "=", "self", ".", "run", "(", "'mktemp -d'", ")", "[", "0", "]", ".", "rstrip", "(", "'\\n'", ")", "self", ".", "run", "(", "'cd {tmpdir}; zcat /home/stack/ironic-python-agent.initramfs| cpio -id'", ".", "format", "(", "tmpdir", "=", "tmpdir", ")", ")", "self", ".", "send_file", "(", "pkg_data_filename", "(", "'static'", ",", "'ironic-wipefs.patch'", ")", ",", "'/tmp/ironic-wipefs.patch'", ")", "self", ".", "run", "(", "'cd {tmpdir}; patch -p0 < /tmp/ironic-wipefs.patch'", ".", "format", "(", "tmpdir", "=", "tmpdir", ")", ")", "self", ".", "run", "(", "'cd {tmpdir}; find . | cpio --create --format=newc > /home/stack/ironic-python-agent.initramfs'", ".", "format", "(", "tmpdir", "=", "tmpdir", ")", ")" ]
65.4
[ 0.03225806451612903, 0.03571428571428571, 0, 0.047619047619047616, 0.18181818181818182, 0.037037037037037035, 0.02702702702702703, 0.029411764705882353, 0.03296703296703297, 0.022222222222222223 ]
def eval(self, cmd): """Evaluate a given command. The command is parsed and the output returned as a list of lines (strings). Raises a SCOCmdSyntaxError in case the command cannot be parsed. Parameters ---------- cmd : strings Command string Returns ------- list(stirng) Command output as list of strings (lines) """ tokens = cmd.upper().split() if len(tokens) == 2 and tokens[0] == 'LIST': if tokens[1] == 'EXPERIMENTS': return self.list_objects(self.sco.experiments_list()) elif tokens[1] == 'IMAGES': return self.list_objects(self.sco.image_groups_list()) elif tokens[1] == 'MODELS': return self.list_objects(self.sco.models_list()) elif tokens[1] == 'SUBJECTS': return self.list_objects(self.sco.subjects_list()) else: raise SCOCmdSyntaxError(cmd, 'unknown type: ' + cmd.split()[1]) else: raise SCOCmdSyntaxError(cmd, 'unknown command')
[ "def", "eval", "(", "self", ",", "cmd", ")", ":", "tokens", "=", "cmd", ".", "upper", "(", ")", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "==", "2", "and", "tokens", "[", "0", "]", "==", "'LIST'", ":", "if", "tokens", "[", "1", "]", "==", "'EXPERIMENTS'", ":", "return", "self", ".", "list_objects", "(", "self", ".", "sco", ".", "experiments_list", "(", ")", ")", "elif", "tokens", "[", "1", "]", "==", "'IMAGES'", ":", "return", "self", ".", "list_objects", "(", "self", ".", "sco", ".", "image_groups_list", "(", ")", ")", "elif", "tokens", "[", "1", "]", "==", "'MODELS'", ":", "return", "self", ".", "list_objects", "(", "self", ".", "sco", ".", "models_list", "(", ")", ")", "elif", "tokens", "[", "1", "]", "==", "'SUBJECTS'", ":", "return", "self", ".", "list_objects", "(", "self", ".", "sco", ".", "subjects_list", "(", ")", ")", "else", ":", "raise", "SCOCmdSyntaxError", "(", "cmd", ",", "'unknown type: '", "+", "cmd", ".", "split", "(", ")", "[", "1", "]", ")", "else", ":", "raise", "SCOCmdSyntaxError", "(", "cmd", ",", "'unknown command'", ")" ]
36.466667
[ 0.05, 0.0273972602739726, 0.06521739130434782, 0, 0.027777777777777776, 0, 0.1111111111111111, 0.1111111111111111, 0.14285714285714285, 0.07692307692307693, 0, 0.13333333333333333, 0.13333333333333333, 0.1, 0.05660377358490566, 0.18181818181818182, 0.05555555555555555, 0.038461538461538464, 0.047619047619047616, 0.028985507246376812, 0.05128205128205128, 0.02857142857142857, 0.05128205128205128, 0.03125, 0.04878048780487805, 0.030303030303030304, 0.11764705882352941, 0.02531645569620253, 0.15384615384615385, 0.03389830508474576 ]
def rvs(self, size=1, param=None): """Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params. """ if param is not None: dtype = [(param, float)] else: dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) for (p,_) in dtype: log_high = numpy.log10(self._bounds[p][0]) log_low = numpy.log10(self._bounds[p][1]) arr[p] = 10.0**(numpy.random.uniform(log_low, log_high, size=size)) return arr
[ "def", "rvs", "(", "self", ",", "size", "=", "1", ",", "param", "=", "None", ")", ":", "if", "param", "is", "not", "None", ":", "dtype", "=", "[", "(", "param", ",", "float", ")", "]", "else", ":", "dtype", "=", "[", "(", "p", ",", "float", ")", "for", "p", "in", "self", ".", "params", "]", "arr", "=", "numpy", ".", "zeros", "(", "size", ",", "dtype", "=", "dtype", ")", "for", "(", "p", ",", "_", ")", "in", "dtype", ":", "log_high", "=", "numpy", ".", "log10", "(", "self", ".", "_bounds", "[", "p", "]", "[", "0", "]", ")", "log_low", "=", "numpy", ".", "log10", "(", "self", ".", "_bounds", "[", "p", "]", "[", "1", "]", ")", "arr", "[", "p", "]", "=", "10.0", "**", "(", "numpy", ".", "random", ".", "uniform", "(", "log_low", ",", "log_high", ",", "size", "=", "size", ")", ")", "return", "arr" ]
37.633333
[ 0.029411764705882353, 0.028985507246376812, 0, 0.1111111111111111, 0.1111111111111111, 0.13043478260869565, 0.05084745762711865, 0.1, 0.0273972602739726, 0.03125, 0, 0.13333333333333333, 0.13333333333333333, 0.08333333333333333, 0.0273972602739726, 0.02531645569620253, 0.02531645569620253, 0.05128205128205128, 0.18181818181818182, 0, 0.06896551724137931, 0.05555555555555555, 0.15384615384615385, 0.03773584905660377, 0.045454545454545456, 0.1111111111111111, 0.037037037037037035, 0.03773584905660377, 0.02531645569620253, 0.1111111111111111 ]
def mavlink_packet(self, m): '''handle an incoming mavlink packet''' mtype = m.get_type() if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']: if self.wp_op is None: self.console.error("No waypoint load started") else: self.wploader.clear() self.wploader.expected_count = m.count self.console.writeln("Requesting %u waypoints t=%s now=%s" % (m.count, time.asctime(time.localtime(m._timestamp)), time.asctime())) self.master.waypoint_request_send(0) elif mtype in ['WAYPOINT', 'MISSION_ITEM'] and self.wp_op != None: if m.seq > self.wploader.count(): self.console.writeln("Unexpected waypoint number %u - expected %u" % (m.seq, self.wploader.count())) elif m.seq < self.wploader.count(): # a duplicate pass else: self.wploader.add(m) if m.seq+1 < self.wploader.expected_count: self.master.waypoint_request_send(m.seq+1) else: if self.wp_op == 'list': for i in range(self.wploader.count()): w = self.wploader.wp(i) print("%u %u %.10f %.10f %f p1=%.1f p2=%.1f p3=%.1f p4=%.1f cur=%u auto=%u" % ( w.command, w.frame, w.x, w.y, w.z, w.param1, w.param2, w.param3, w.param4, w.current, w.autocontinue)) if self.logdir != None: waytxt = os.path.join(self.logdir, 'way.txt') self.save_waypoints(waytxt) print("Saved waypoints to %s" % waytxt) elif self.wp_op == "save": self.save_waypoints(self.wp_save_filename) self.wp_op = None elif mtype in ["WAYPOINT_REQUEST", "MISSION_REQUEST"]: self.process_waypoint_request(m, self.master) elif mtype in ["WAYPOINT_CURRENT", "MISSION_CURRENT"]: if m.seq != self.last_waypoint: self.last_waypoint = m.seq if self.settings.wpupdates: self.say("waypoint %u" % m.seq,priority='message')
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "mtype", "=", "m", ".", "get_type", "(", ")", "if", "mtype", "in", "[", "'WAYPOINT_COUNT'", ",", "'MISSION_COUNT'", "]", ":", "if", "self", ".", "wp_op", "is", "None", ":", "self", ".", "console", ".", "error", "(", "\"No waypoint load started\"", ")", "else", ":", "self", ".", "wploader", ".", "clear", "(", ")", "self", ".", "wploader", ".", "expected_count", "=", "m", ".", "count", "self", ".", "console", ".", "writeln", "(", "\"Requesting %u waypoints t=%s now=%s\"", "%", "(", "m", ".", "count", ",", "time", ".", "asctime", "(", "time", ".", "localtime", "(", "m", ".", "_timestamp", ")", ")", ",", "time", ".", "asctime", "(", ")", ")", ")", "self", ".", "master", ".", "waypoint_request_send", "(", "0", ")", "elif", "mtype", "in", "[", "'WAYPOINT'", ",", "'MISSION_ITEM'", "]", "and", "self", ".", "wp_op", "!=", "None", ":", "if", "m", ".", "seq", ">", "self", ".", "wploader", ".", "count", "(", ")", ":", "self", ".", "console", ".", "writeln", "(", "\"Unexpected waypoint number %u - expected %u\"", "%", "(", "m", ".", "seq", ",", "self", ".", "wploader", ".", "count", "(", ")", ")", ")", "elif", "m", ".", "seq", "<", "self", ".", "wploader", ".", "count", "(", ")", ":", "# a duplicate", "pass", "else", ":", "self", ".", "wploader", ".", "add", "(", "m", ")", "if", "m", ".", "seq", "+", "1", "<", "self", ".", "wploader", ".", "expected_count", ":", "self", ".", "master", ".", "waypoint_request_send", "(", "m", ".", "seq", "+", "1", ")", "else", ":", "if", "self", ".", "wp_op", "==", "'list'", ":", "for", "i", "in", "range", "(", "self", ".", "wploader", ".", "count", "(", ")", ")", ":", "w", "=", "self", ".", "wploader", ".", "wp", "(", "i", ")", "print", "(", "\"%u %u %.10f %.10f %f p1=%.1f p2=%.1f p3=%.1f p4=%.1f cur=%u auto=%u\"", "%", "(", "w", ".", "command", ",", "w", ".", "frame", ",", "w", ".", "x", ",", "w", ".", "y", ",", "w", ".", "z", ",", "w", ".", "param1", ",", "w", ".", "param2", ",", "w", ".", "param3", ",", "w", ".", "param4", ",", "w", ".", "current", ",", "w", ".", "autocontinue", ")", ")", "if", "self", ".", "logdir", "!=", "None", ":", "waytxt", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "'way.txt'", ")", "self", ".", "save_waypoints", "(", "waytxt", ")", "print", "(", "\"Saved waypoints to %s\"", "%", "waytxt", ")", "elif", "self", ".", "wp_op", "==", "\"save\"", ":", "self", ".", "save_waypoints", "(", "self", ".", "wp_save_filename", ")", "self", ".", "wp_op", "=", "None", "elif", "mtype", "in", "[", "\"WAYPOINT_REQUEST\"", ",", "\"MISSION_REQUEST\"", "]", ":", "self", ".", "process_waypoint_request", "(", "m", ",", "self", ".", "master", ")", "elif", "mtype", "in", "[", "\"WAYPOINT_CURRENT\"", ",", "\"MISSION_CURRENT\"", "]", ":", "if", "m", ".", "seq", "!=", "self", ".", "last_waypoint", ":", "self", ".", "last_waypoint", "=", "m", ".", "seq", "if", "self", ".", "settings", ".", "wpupdates", ":", "self", ".", "say", "(", "\"waypoint %u\"", "%", "m", ".", "seq", ",", "priority", "=", "'message'", ")" ]
50.166667
[ 0.03571428571428571, 0.0425531914893617, 0.07142857142857142, 0.05454545454545454, 0.058823529411764705, 0.03225806451612903, 0.11764705882352941, 0.05405405405405406, 0.037037037037037035, 0.046511627906976744, 0.03225806451612903, 0.05154639175257732, 0.038461538461538464, 0, 0.04054054054054054, 0.044444444444444446, 0.02586206896551724, 0.0425531914893617, 0.06896551724137931, 0.1, 0.11764705882352941, 0.05555555555555555, 0.037037037037037035, 0.034482758620689655, 0.11764705882352941, 0.05, 0.034482758620689655, 0.0425531914893617, 0.038834951456310676, 0.03225806451612903, 0.029850746268656716, 0.05454545454545454, 0.06976744186046512, 0.028985507246376812, 0.0392156862745098, 0.031746031746031744, 0.047619047619047616, 0.03225806451612903, 0.06060606060606061, 0, 0.03225806451612903, 0.03508771929824561, 0, 0.03225806451612903, 0.046511627906976744, 0.047619047619047616, 0.046511627906976744, 0.04285714285714286 ]
def get_access_token(self, code): """ Gets the access token for the app given the code Parameters: - code - the response code """ payload = {'redirect_uri': self.redirect_uri, 'code': code, 'grant_type': 'authorization_code'} headers = self._make_authorization_headers() response = requests.post(self.OAUTH_TOKEN_URL, data=payload, headers=headers, verify=LOGIN_VERIFY_SSL_CERT) if response.status_code is not 200: raise MercedesMeAuthError(response.reason) token_info = response.json() token_info = self._add_custom_values_to_token_info(token_info) self._save_token_info(token_info) return token_info
[ "def", "get_access_token", "(", "self", ",", "code", ")", ":", "payload", "=", "{", "'redirect_uri'", ":", "self", ".", "redirect_uri", ",", "'code'", ":", "code", ",", "'grant_type'", ":", "'authorization_code'", "}", "headers", "=", "self", ".", "_make_authorization_headers", "(", ")", "response", "=", "requests", ".", "post", "(", "self", ".", "OAUTH_TOKEN_URL", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "verify", "=", "LOGIN_VERIFY_SSL_CERT", ")", "if", "response", ".", "status_code", "is", "not", "200", ":", "raise", "MercedesMeAuthError", "(", "response", ".", "reason", ")", "token_info", "=", "response", ".", "json", "(", ")", "token_info", "=", "self", ".", "_add_custom_values_to_token_info", "(", "token_info", ")", "self", ".", "_save_token_info", "(", "token_info", ")", "return", "token_info" ]
35.952381
[ 0.030303030303030304, 0.03333333333333333, 0, 0.08695652173913043, 0.047619047619047616, 0.18181818181818182, 0, 0.05660377358490566, 0.09375, 0.07407407407407407, 0, 0.038461538461538464, 0, 0.04411764705882353, 0.08620689655172414, 0.046511627906976744, 0.037037037037037035, 0.05555555555555555, 0.02857142857142857, 0.04878048780487805, 0.08 ]
def __vector_to_string(self, vector): """ Returns string representation of vector. """ return numpy.array_str(numpy.round(unitvec(vector), decimals=3))
[ "def", "__vector_to_string", "(", "self", ",", "vector", ")", ":", "return", "numpy", ".", "array_str", "(", "numpy", ".", "round", "(", "unitvec", "(", "vector", ")", ",", "decimals", "=", "3", ")", ")" ]
55
[ 0.02702702702702703, 0.03571428571428571, 0.027777777777777776 ]
def onChange(self, min_changed_pixels=None, handler=None): """ Registers an event to call ``handler`` when at least ``min_changed_pixels`` change in this region. (Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels) The ``handler`` function should take one parameter, an ObserveEvent object (see below). This event is ignored in the future unless the handler calls the repeat() method on the provided ObserveEvent object. Returns the event's ID as a string. """ if isinstance(min_changed_pixels, int) and (callable(handler) or handler is None): return self._observer.register_event( "CHANGE", pattern=(min_changed_pixels, self.getBitmap()), handler=handler) elif (callable(min_changed_pixels) or min_changed_pixels is None) and (callable(handler) or handler is None): handler = min_changed_pixels or handler return self._observer.register_event( "CHANGE", pattern=(Settings.ObserveMinChangedPixels, self.getBitmap()), handler=handler) else: raise ValueError("Unsupported arguments for onChange method")
[ "def", "onChange", "(", "self", ",", "min_changed_pixels", "=", "None", ",", "handler", "=", "None", ")", ":", "if", "isinstance", "(", "min_changed_pixels", ",", "int", ")", "and", "(", "callable", "(", "handler", ")", "or", "handler", "is", "None", ")", ":", "return", "self", ".", "_observer", ".", "register_event", "(", "\"CHANGE\"", ",", "pattern", "=", "(", "min_changed_pixels", ",", "self", ".", "getBitmap", "(", ")", ")", ",", "handler", "=", "handler", ")", "elif", "(", "callable", "(", "min_changed_pixels", ")", "or", "min_changed_pixels", "is", "None", ")", "and", "(", "callable", "(", "handler", ")", "or", "handler", "is", "None", ")", ":", "handler", "=", "min_changed_pixels", "or", "handler", "return", "self", ".", "_observer", ".", "register_event", "(", "\"CHANGE\"", ",", "pattern", "=", "(", "Settings", ".", "ObserveMinChangedPixels", ",", "self", ".", "getBitmap", "(", ")", ")", ",", "handler", "=", "handler", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported arguments for onChange method\"", ")" ]
49.4
[ 0.017241379310344827, 0.034482758620689655, 0.06666666666666667, 0, 0.03614457831325301, 0, 0.04878048780487805, 0.037037037037037035, 0.03125, 0, 0.046511627906976744, 0.18181818181818182, 0.03333333333333333, 0.061224489795918366, 0.08, 0.047619047619047616, 0.125, 0.02564102564102564, 0.0392156862745098, 0.061224489795918366, 0.08, 0.03896103896103896, 0.125, 0.15384615384615385, 0.0273972602739726 ]
def get_user_agent(name="ontobio", version=ontobio_version, modules=None, caller_name=None): """ Create a User-Agent string """ user_agent_array = ["{}/{}".format(name, version)] if modules: module_info_array = [] for m in modules: mod_name = m.__name__ mod_version = None if hasattr(m, 'get_version'): mod_version = m.get_version() else: mod_version = m.__version__ module_info_array.append("{}/{}".format(mod_name, mod_version)) if caller_name: module_info_array.append(caller_name) user_agent_array.append("({})".format('; '.join(module_info_array))) else: if caller_name: user_agent_array.append("({})".format(caller_name)) return ' '.join(user_agent_array)
[ "def", "get_user_agent", "(", "name", "=", "\"ontobio\"", ",", "version", "=", "ontobio_version", ",", "modules", "=", "None", ",", "caller_name", "=", "None", ")", ":", "user_agent_array", "=", "[", "\"{}/{}\"", ".", "format", "(", "name", ",", "version", ")", "]", "if", "modules", ":", "module_info_array", "=", "[", "]", "for", "m", "in", "modules", ":", "mod_name", "=", "m", ".", "__name__", "mod_version", "=", "None", "if", "hasattr", "(", "m", ",", "'get_version'", ")", ":", "mod_version", "=", "m", ".", "get_version", "(", ")", "else", ":", "mod_version", "=", "m", ".", "__version__", "module_info_array", ".", "append", "(", "\"{}/{}\"", ".", "format", "(", "mod_name", ",", "mod_version", ")", ")", "if", "caller_name", ":", "module_info_array", ".", "append", "(", "caller_name", ")", "user_agent_array", ".", "append", "(", "\"({})\"", ".", "format", "(", "'; '", ".", "join", "(", "module_info_array", ")", ")", ")", "else", ":", "if", "caller_name", ":", "user_agent_array", ".", "append", "(", "\"({})\"", ".", "format", "(", "caller_name", ")", ")", "return", "' '", ".", "join", "(", "user_agent_array", ")" ]
31.692308
[ 0.021739130434782608, 0.2857142857142857, 0.06666666666666667, 0.2857142857142857, 0, 0.037037037037037035, 0.13333333333333333, 0.06666666666666667, 0.08, 0.06060606060606061, 0.06666666666666667, 0.04878048780487805, 0.044444444444444446, 0.11764705882352941, 0.046511627906976744, 0.02666666666666667, 0, 0.08695652173913043, 0.04081632653061224, 0, 0.02631578947368421, 0.2222222222222222, 0.08695652173913043, 0.031746031746031744, 0, 0.05405405405405406 ]
def _label_select_column(self, select, column, populate_result_map, asfrom, column_clause_args, name=None, within_columns_clause=True): """produce labeled columns present in a select().""" if column.type._has_column_expression and \ populate_result_map: col_expr = column.type.column_expression(column) add_to_result_map = lambda keyname, name, objects, type_: \ self._add_to_result_map( keyname, name, (column,) + objects, type_) else: col_expr = column if populate_result_map: add_to_result_map = self._add_to_result_map else: add_to_result_map = None if not within_columns_clause: result_expr = col_expr elif isinstance(column, elements.Label): if col_expr is not column: result_expr = compiler._CompileLabel( col_expr, column.name, alt_names=(column.element,) ) else: result_expr = col_expr elif select is not None and name: result_expr = compiler._CompileLabel( col_expr, name, alt_names=(column._key_label,) ) elif \ asfrom and \ isinstance(column, elements.ColumnClause) and \ not column.is_literal and \ column.table is not None and \ not isinstance(column.table, selectable.Select): result_expr = compiler._CompileLabel(col_expr, elements._as_truncated( column.name), alt_names=(column.key,)) elif ( not isinstance(column, elements.TextClause) and ( not isinstance(column, elements.UnaryExpression) or column.wraps_column_expression ) and ( not hasattr(column, 'name') or isinstance(column, functions.Function) ) ): result_expr = compiler._CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? # assert isinstance(column, elements.ColumnClause) result_expr = compiler._CompileLabel(col_expr, elements._as_truncated( column.name), alt_names=(column.key,)) else: result_expr = col_expr column_clause_args.update( within_columns_clause=within_columns_clause, add_to_result_map=add_to_result_map ) return result_expr._compiler_dispatch( self, **column_clause_args )
[ "def", "_label_select_column", "(", "self", ",", "select", ",", "column", ",", "populate_result_map", ",", "asfrom", ",", "column_clause_args", ",", "name", "=", "None", ",", "within_columns_clause", "=", "True", ")", ":", "if", "column", ".", "type", ".", "_has_column_expression", "and", "populate_result_map", ":", "col_expr", "=", "column", ".", "type", ".", "column_expression", "(", "column", ")", "add_to_result_map", "=", "lambda", "keyname", ",", "name", ",", "objects", ",", "type_", ":", "self", ".", "_add_to_result_map", "(", "keyname", ",", "name", ",", "(", "column", ",", ")", "+", "objects", ",", "type_", ")", "else", ":", "col_expr", "=", "column", "if", "populate_result_map", ":", "add_to_result_map", "=", "self", ".", "_add_to_result_map", "else", ":", "add_to_result_map", "=", "None", "if", "not", "within_columns_clause", ":", "result_expr", "=", "col_expr", "elif", "isinstance", "(", "column", ",", "elements", ".", "Label", ")", ":", "if", "col_expr", "is", "not", "column", ":", "result_expr", "=", "compiler", ".", "_CompileLabel", "(", "col_expr", ",", "column", ".", "name", ",", "alt_names", "=", "(", "column", ".", "element", ",", ")", ")", "else", ":", "result_expr", "=", "col_expr", "elif", "select", "is", "not", "None", "and", "name", ":", "result_expr", "=", "compiler", ".", "_CompileLabel", "(", "col_expr", ",", "name", ",", "alt_names", "=", "(", "column", ".", "_key_label", ",", ")", ")", "elif", "asfrom", "and", "isinstance", "(", "column", ",", "elements", ".", "ColumnClause", ")", "and", "not", "column", ".", "is_literal", "and", "column", ".", "table", "is", "not", "None", "and", "not", "isinstance", "(", "column", ".", "table", ",", "selectable", ".", "Select", ")", ":", "result_expr", "=", "compiler", ".", "_CompileLabel", "(", "col_expr", ",", "elements", ".", "_as_truncated", "(", "column", ".", "name", ")", ",", "alt_names", "=", "(", "column", ".", "key", ",", ")", ")", "elif", "(", "not", "isinstance", "(", "column", ",", "elements", ".", "TextClause", ")", "and", "(", "not", "isinstance", "(", "column", ",", "elements", ".", "UnaryExpression", ")", "or", "column", ".", "wraps_column_expression", ")", "and", "(", "not", "hasattr", "(", "column", ",", "'name'", ")", "or", "isinstance", "(", "column", ",", "functions", ".", "Function", ")", ")", ")", ":", "result_expr", "=", "compiler", ".", "_CompileLabel", "(", "col_expr", ",", "column", ".", "anon_label", ")", "elif", "col_expr", "is", "not", "column", ":", "# TODO: are we sure \"column\" has a .name and .key here ?", "# assert isinstance(column, elements.ColumnClause)", "result_expr", "=", "compiler", ".", "_CompileLabel", "(", "col_expr", ",", "elements", ".", "_as_truncated", "(", "column", ".", "name", ")", ",", "alt_names", "=", "(", "column", ".", "key", ",", ")", ")", "else", ":", "result_expr", "=", "col_expr", "column_clause_args", ".", "update", "(", "within_columns_clause", "=", "within_columns_clause", ",", "add_to_result_map", "=", "add_to_result_map", ")", "return", "result_expr", ".", "_compiler_dispatch", "(", "self", ",", "*", "*", "column_clause_args", ")" ]
42.243902
[ 0.043478260869565216, 0.061224489795918366, 0.05357142857142857, 0.10256410256410256, 0.08771929824561403, 0.03333333333333333, 0, 0.0392156862745098, 0.05555555555555555, 0.03333333333333333, 0.04225352112676056, 0.075, 0.058823529411764705, 0.06382978723404255, 0.15384615384615385, 0.06896551724137931, 0.05714285714285714, 0.03389830508474576, 0.11764705882352941, 0.05, 0, 0.05405405405405406, 0.058823529411764705, 0.041666666666666664, 0.05263157894736842, 0.05660377358490566, 0.06896551724137931, 0.0625, 0.06382978723404255, 0.17647058823529413, 0.11764705882352941, 0.05263157894736842, 0, 0.04878048780487805, 0.061224489795918366, 0.08, 0.09523809523809523, 0.06521739130434782, 0.23076923076923078, 0, 0.14285714285714285, 0.03333333333333333, 0.045454545454545456, 0.05747126436781609, 0.029850746268656716, 0.02857142857142857, 0.027777777777777776, 0.05172413793103448, 0.05555555555555555, 0.06060606060606061, 0.0684931506849315, 0.21428571428571427, 0.028169014084507043, 0.12, 0.05172413793103448, 0.05063291139240506, 0.030303030303030304, 0.10344827586206896, 0.14285714285714285, 0.03225806451612903, 0.02857142857142857, 0.14285714285714285, 0.3, 0.025974025974025976, 0.05555555555555555, 0.029411764705882353, 0.03225806451612903, 0.05172413793103448, 0.05555555555555555, 0.06060606060606061, 0.0684931506849315, 0.15384615384615385, 0.058823529411764705, 0, 0.08823529411764706, 0.05357142857142857, 0.06382978723404255, 0.3333333333333333, 0.06521739130434782, 0.11764705882352941, 0.0625, 0.3333333333333333 ]
def make_directory_entry(d): """ Create a directory entry that conforms to the format of the Desktop Entry Specification by freedesktop.org. See: http://freedesktop.org/Standards/desktop-entry-spec These should work for both KDE and Gnome2 An entry is a .directory file that includes the display name, icon, etc. It will be placed in the location specified within the passed dict. The filename can be explicitly specified, but if not provided, will default to an escaped version of the name. """ assert d['path'].endswith('.directory') # default values d.setdefault('comment', '') d.setdefault('icon', '') fo = open(d['path'], "w") fo.write("""\ [Desktop Entry] Type=Directory Encoding=UTF-8 Name=%(name)s Comment=%(comment)s Icon=%(icon)s """ % d) fo.close()
[ "def", "make_directory_entry", "(", "d", ")", ":", "assert", "d", "[", "'path'", "]", ".", "endswith", "(", "'.directory'", ")", "# default values", "d", ".", "setdefault", "(", "'comment'", ",", "''", ")", "d", ".", "setdefault", "(", "'icon'", ",", "''", ")", "fo", "=", "open", "(", "d", "[", "'path'", "]", ",", "\"w\"", ")", "fo", ".", "write", "(", "\"\"\"\\\n[Desktop Entry]\nType=Directory\nEncoding=UTF-8\nName=%(name)s\nComment=%(comment)s\nIcon=%(icon)s\n\"\"\"", "%", "d", ")", "fo", ".", "close", "(", ")" ]
29.035714
[ 0.03571428571428571, 0.2857142857142857, 0.025974025974025976, 0.046511627906976744, 0.047619047619047616, 0.044444444444444446, 0, 0.02631578947368421, 0.02631578947368421, 0.02564102564102564, 0.05714285714285714, 0.2857142857142857, 0.046511627906976744, 0, 0.1, 0.06451612903225806, 0.07142857142857142, 0, 0.06896551724137931, 0.17647058823529413, 0.06666666666666667, 0.14285714285714285, 0.14285714285714285, 0.23076923076923078, 0.15789473684210525, 0.23076923076923078, 0.125, 0.14285714285714285 ]
def parse_solvebio_args(self, args=None, namespace=None): """ Try to parse the args first, and then add the subparsers. We want to do this so that we can check to see if there are any unknown args. We can assume that if, by this point, there are no unknown args, we can append shell to the unknown args as a default. However, to do this, we have to suppress stdout/stderr during the initial parsing, in case the user calls the help method (in which case we want to add the additional arguments and *then* call the help method. This is a hack to get around the fact that argparse doesn't allow default subcommands. """ try: sys.stdout = sys.stderr = open(os.devnull, 'w') _, unknown_args = self.parse_known_args(args, namespace) if not unknown_args: args.insert(0, 'shell') except SystemExit: pass finally: sys.stdout.flush() sys.stderr.flush() sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__ self._add_subcommands() return super(SolveArgumentParser, self).parse_args(args, namespace)
[ "def", "parse_solvebio_args", "(", "self", ",", "args", "=", "None", ",", "namespace", "=", "None", ")", ":", "try", ":", "sys", ".", "stdout", "=", "sys", ".", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "_", ",", "unknown_args", "=", "self", ".", "parse_known_args", "(", "args", ",", "namespace", ")", "if", "not", "unknown_args", ":", "args", ".", "insert", "(", "0", ",", "'shell'", ")", "except", "SystemExit", ":", "pass", "finally", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "sys", ".", "__stdout__", ",", "sys", ".", "__stderr__", "self", ".", "_add_subcommands", "(", ")", "return", "super", "(", "SolveArgumentParser", ",", "self", ")", ".", "parse_args", "(", "args", ",", "namespace", ")" ]
49.24
[ 0.017543859649122806, 0.18181818181818182, 0.025974025974025976, 0.02666666666666667, 0.02631578947368421, 0.028169014084507043, 0.025974025974025976, 0.06493506493506493, 0.039473684210526314, 0.02631578947368421, 0.043478260869565216, 0.18181818181818182, 0.16666666666666666, 0.03389830508474576, 0.029411764705882353, 0.0625, 0.05128205128205128, 0.07692307692307693, 0.125, 0.125, 0.06666666666666667, 0.06666666666666667, 0.029850746268656716, 0.06451612903225806, 0.02666666666666667 ]
def connection_cache(func: callable): """Connection cache for SSH sessions. This is to prevent opening a new, expensive connection on every command run.""" cache = dict() lock = RLock() @wraps(func) def func_wrapper(host: str, username: str, *args, **kwargs): key = "{h}-{u}".format(h=host, u=username) if key in cache: # connection exists, check if it is still valid before # returning it. conn = cache[key] if conn and conn.is_active() and conn.is_authenticated(): return conn else: # try to close a bad connection and remove it from # the cache. if conn: try_close(conn) del cache[key] # key is not in the cache, so try to recreate it # it may have been removed just above. if key not in cache: conn = func(host, username, *args, **kwargs) if conn is not None: cache[key] = conn return conn # not sure how to reach this point, but just in case. return None def get_cache() -> dict: return cache def purge(key: str=None): with lock: if key is None: conns = [(k, v) for k, v in cache.items()] elif key in cache: conns = ((key, cache[key]), ) else: conns = list() for k, v in conns: try_close(v) del cache[k] func_wrapper.get_cache = get_cache func_wrapper.purge = purge return func_wrapper
[ "def", "connection_cache", "(", "func", ":", "callable", ")", ":", "cache", "=", "dict", "(", ")", "lock", "=", "RLock", "(", ")", "@", "wraps", "(", "func", ")", "def", "func_wrapper", "(", "host", ":", "str", ",", "username", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "\"{h}-{u}\"", ".", "format", "(", "h", "=", "host", ",", "u", "=", "username", ")", "if", "key", "in", "cache", ":", "# connection exists, check if it is still valid before", "# returning it.", "conn", "=", "cache", "[", "key", "]", "if", "conn", "and", "conn", ".", "is_active", "(", ")", "and", "conn", ".", "is_authenticated", "(", ")", ":", "return", "conn", "else", ":", "# try to close a bad connection and remove it from", "# the cache.", "if", "conn", ":", "try_close", "(", "conn", ")", "del", "cache", "[", "key", "]", "# key is not in the cache, so try to recreate it", "# it may have been removed just above.", "if", "key", "not", "in", "cache", ":", "conn", "=", "func", "(", "host", ",", "username", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "conn", "is", "not", "None", ":", "cache", "[", "key", "]", "=", "conn", "return", "conn", "# not sure how to reach this point, but just in case.", "return", "None", "def", "get_cache", "(", ")", "->", "dict", ":", "return", "cache", "def", "purge", "(", "key", ":", "str", "=", "None", ")", ":", "with", "lock", ":", "if", "key", "is", "None", ":", "conns", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "cache", ".", "items", "(", ")", "]", "elif", "key", "in", "cache", ":", "conns", "=", "(", "(", "key", ",", "cache", "[", "key", "]", ")", ",", ")", "else", ":", "conns", "=", "list", "(", ")", "for", "k", ",", "v", "in", "conns", ":", "try_close", "(", "v", ")", "del", "cache", "[", "k", "]", "func_wrapper", ".", "get_cache", "=", "get_cache", "func_wrapper", ".", "purge", "=", "purge", "return", "func_wrapper" ]
30.826923
[ 0.02702702702702703, 0.02857142857142857, 0.07272727272727272, 0.1111111111111111, 0.1111111111111111, 0, 0.125, 0.03125, 0.04, 0.08333333333333333, 0.030303030303030304, 0.07407407407407407, 0.06896551724137931, 0.028985507246376812, 0.07407407407407407, 0.11764705882352941, 0.030303030303030304, 0.07142857142857142, 0.08333333333333333, 0.05714285714285714, 0.06666666666666667, 0, 0.03571428571428571, 0.043478260869565216, 0.07142857142857142, 0.03571428571428571, 0.0625, 0.06060606060606061, 0.08695652173913043, 0, 0.03278688524590164, 0.10526315789473684, 0, 0.07142857142857142, 0.1, 0, 0.13793103448275862, 0.1111111111111111, 0.07407407407407407, 0.034482758620689655, 0.06666666666666667, 0.044444444444444446, 0.11764705882352941, 0.06666666666666667, 0, 0.06666666666666667, 0.07142857142857142, 0.07142857142857142, 0, 0.05263157894736842, 0.06666666666666667, 0.08695652173913043 ]
def snr_ratio(in1, in2): """ The following function simply calculates the signal to noise ratio between two signals. INPUTS: in1 (no default): Array containing values for signal 1. in2 (no default): Array containing values for signal 2. OUTPUTS: out1 The ratio of the signal to noise ratios of two signals. """ out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2))) return out1
[ "def", "snr_ratio", "(", "in1", ",", "in2", ")", ":", "out1", "=", "20", "*", "(", "np", ".", "log10", "(", "np", ".", "linalg", ".", "norm", "(", "in1", ")", "/", "np", ".", "linalg", ".", "norm", "(", "in1", "-", "in2", ")", ")", ")", "return", "out1" ]
30.733333
[ 0.041666666666666664, 0.2857142857142857, 0.03296703296703297, 0, 0.18181818181818182, 0.043478260869565216, 0.043478260869565216, 0, 0.16666666666666666, 0.034482758620689655, 0.2857142857142857, 0, 0.028985507246376812, 0, 0.13333333333333333 ]
def resource(self, resource_type): """Get instance of Resource Class with dynamic type. Args: resource_type: The resource type name (e.g Adversary, User Agent, etc). Returns: (object): Instance of Resource Object child class. """ try: resource = getattr(self.resources, self.safe_rt(resource_type))(self) except AttributeError: self._resources(True) resource = getattr(self.resources, self.safe_rt(resource_type))(self) return resource
[ "def", "resource", "(", "self", ",", "resource_type", ")", ":", "try", ":", "resource", "=", "getattr", "(", "self", ".", "resources", ",", "self", ".", "safe_rt", "(", "resource_type", ")", ")", "(", "self", ")", "except", "AttributeError", ":", "self", ".", "_resources", "(", "True", ")", "resource", "=", "getattr", "(", "self", ".", "resources", ",", "self", ".", "safe_rt", "(", "resource_type", ")", ")", "(", "self", ")", "return", "resource" ]
35.933333
[ 0.029411764705882353, 0.03333333333333333, 0, 0.15384615384615385, 0.04819277108433735, 0, 0.125, 0.03225806451612903, 0.18181818181818182, 0.16666666666666666, 0.037037037037037035, 0.06666666666666667, 0.06060606060606061, 0.037037037037037035, 0.08695652173913043 ]
def fix_e225(self, result): """Fix missing whitespace around operator.""" target = self.source[result['line'] - 1] offset = result['column'] - 1 fixed = target[:offset] + ' ' + target[offset:] # Only proceed if non-whitespace characters match. # And make sure we don't break the indentation. if ( fixed.replace(' ', '') == target.replace(' ', '') and _get_indentation(fixed) == _get_indentation(target) ): self.source[result['line'] - 1] = fixed error_code = result.get('id', 0) try: ts = generate_tokens(fixed) except (SyntaxError, tokenize.TokenError): return if not check_syntax(fixed.lstrip()): return errors = list( pycodestyle.missing_whitespace_around_operator(fixed, ts)) for e in reversed(errors): if error_code != e[1].split()[0]: continue offset = e[0][1] fixed = fixed[:offset] + ' ' + fixed[offset:] self.source[result['line'] - 1] = fixed else: return []
[ "def", "fix_e225", "(", "self", ",", "result", ")", ":", "target", "=", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]", "offset", "=", "result", "[", "'column'", "]", "-", "1", "fixed", "=", "target", "[", ":", "offset", "]", "+", "' '", "+", "target", "[", "offset", ":", "]", "# Only proceed if non-whitespace characters match.", "# And make sure we don't break the indentation.", "if", "(", "fixed", ".", "replace", "(", "' '", ",", "''", ")", "==", "target", ".", "replace", "(", "' '", ",", "''", ")", "and", "_get_indentation", "(", "fixed", ")", "==", "_get_indentation", "(", "target", ")", ")", ":", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]", "=", "fixed", "error_code", "=", "result", ".", "get", "(", "'id'", ",", "0", ")", "try", ":", "ts", "=", "generate_tokens", "(", "fixed", ")", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "return", "if", "not", "check_syntax", "(", "fixed", ".", "lstrip", "(", ")", ")", ":", "return", "errors", "=", "list", "(", "pycodestyle", ".", "missing_whitespace_around_operator", "(", "fixed", ",", "ts", ")", ")", "for", "e", "in", "reversed", "(", "errors", ")", ":", "if", "error_code", "!=", "e", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ":", "continue", "offset", "=", "e", "[", "0", "]", "[", "1", "]", "fixed", "=", "fixed", "[", ":", "offset", "]", "+", "' '", "+", "fixed", "[", "offset", ":", "]", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]", "=", "fixed", "else", ":", "return", "[", "]" ]
39.2
[ 0.037037037037037035, 0.03773584905660377, 0.041666666666666664, 0.05405405405405406, 0.03636363636363636, 0, 0.034482758620689655, 0.03636363636363636, 0.25, 0.03076923076923077, 0.031746031746031744, 0.3, 0.0392156862745098, 0.045454545454545456, 0.125, 0.046511627906976744, 0.037037037037037035, 0.09090909090909091, 0.041666666666666664, 0.09090909090909091, 0.11538461538461539, 0.04054054054054054, 0.05263157894736842, 0.04081632653061224, 0.07142857142857142, 0.0625, 0.03278688524590164, 0.0392156862745098, 0.15384615384615385, 0.09523809523809523 ]
def member_del(self, member_id, reconfig=True): """remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) if reconfig and member_id in [member['_id'] for member in self.members()]: config = self.config config['members'].pop(member_id) self.repl_update(config) self._servers.remove(server_id) return True
[ "def", "member_del", "(", "self", ",", "member_id", ",", "reconfig", "=", "True", ")", ":", "server_id", "=", "self", ".", "_servers", ".", "host_to_server_id", "(", "self", ".", "member_id_to_host", "(", "member_id", ")", ")", "if", "reconfig", "and", "member_id", "in", "[", "member", "[", "'_id'", "]", "for", "member", "in", "self", ".", "members", "(", ")", "]", ":", "config", "=", "self", ".", "config", "config", "[", "'members'", "]", ".", "pop", "(", "member_id", ")", "self", ".", "repl_update", "(", "config", ")", "self", ".", "_servers", ".", "remove", "(", "server_id", ")", "return", "True" ]
37.5625
[ 0.02127659574468085, 0.04878048780487805, 0.15384615384615385, 0.05555555555555555, 0.0425531914893617, 0, 0.03571428571428571, 0.18181818181818182, 0.057692307692307696, 0.06521739130434782, 0.036585365853658534, 0.0625, 0.045454545454545456, 0.05555555555555555, 0.05128205128205128, 0.10526315789473684 ]
def setMilitaryTime(self, state=True): """ Sets whether or not this widget will be displayed in military time. When in military time, the hour options will go from 01-24, when in normal mode, the hours will go 1-12. :param state | <bool> """ time = self.time() self._militaryTime = state self._hourCombo.clear() if state: self._timeOfDayCombo.hide() self._hourCombo.addItems(['{0:02d}'.format(i+1) for i in xrange(24)]) else: self._timeOfDayCombo.show() self._hourCombo.addItems(['{0}'.format(i+1) for i in xrange(12)]) self.setTime(time)
[ "def", "setMilitaryTime", "(", "self", ",", "state", "=", "True", ")", ":", "time", "=", "self", ".", "time", "(", ")", "self", ".", "_militaryTime", "=", "state", "self", ".", "_hourCombo", ".", "clear", "(", ")", "if", "state", ":", "self", ".", "_timeOfDayCombo", ".", "hide", "(", ")", "self", ".", "_hourCombo", ".", "addItems", "(", "[", "'{0:02d}'", ".", "format", "(", "i", "+", "1", ")", "for", "i", "in", "xrange", "(", "24", ")", "]", ")", "else", ":", "self", ".", "_timeOfDayCombo", ".", "show", "(", ")", "self", ".", "_hourCombo", ".", "addItems", "(", "[", "'{0}'", ".", "format", "(", "i", "+", "1", ")", "for", "i", "in", "xrange", "(", "12", ")", "]", ")", "self", ".", "setTime", "(", "time", ")" ]
34.3
[ 0.02564102564102564, 0.08333333333333333, 0.017094017094017096, 0.0136986301369863, 1, 0.10810810810810811, 0.08333333333333333, 0.037037037037037035, 1, 0.02857142857142857, 0.03125, 1, 0.16666666666666666, 0.025, 0.024390243902439025, 0.21428571428571427, 0.025, 0.01282051282051282, 1, 0.07692307692307693 ]
def colindex_by_colname(self, colname): """Return column index whose name is :param:`column` :raises: `ValueError` when no column with :param:`colname` found """ for i, coldef in enumerate(self): # iterate each column's definition if coldef.name == colname: return i raise ValueError('No column named "%s" found' % (colname))
[ "def", "colindex_by_colname", "(", "self", ",", "colname", ")", ":", "for", "i", ",", "coldef", "in", "enumerate", "(", "self", ")", ":", "# iterate each column's definition", "if", "coldef", ".", "name", "==", "colname", ":", "return", "i", "raise", "ValueError", "(", "'No column named \"%s\" found'", "%", "(", "colname", ")", ")" ]
43.222222
[ 0.02564102564102564, 0.03333333333333333, 0, 0.09722222222222222, 0.18181818181818182, 0.02531645569620253, 0.05263157894736842, 0.08333333333333333, 0.030303030303030304 ]
def LightcurveHDU(model): ''' Construct the data HDU file containing the arrays and the observing info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=1) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) cards.append(('MODEL', model.name, 'Name of EVEREST model used')) cards.append(('APNAME', model.aperture_name, 'Name of aperture used')) cards.append(('BPAD', model.bpad, 'Chunk overlap in cadences')) for c in range(len(model.breakpoints)): cards.append( ('BRKPT%02d' % (c + 1), model.breakpoints[c], 'Light curve breakpoint')) cards.append(('CBVNUM', model.cbv_num, 'Number of CBV signals to recover')) cards.append(('CBVNITER', model.cbv_niter, 'Number of CBV SysRem iterations')) cards.append(('CBVWIN', model.cbv_win, 'Window size for smoothing CBVs')) cards.append(('CBVORD', model.cbv_order, 'Order when smoothing CBVs')) cards.append(('CDIVS', model.cdivs, 'Cross-validation subdivisions')) cards.append(('CDPP', model.cdpp, 'Average de-trended CDPP')) cards.append(('CDPPR', model.cdppr, 'Raw CDPP')) cards.append(('CDPPV', model.cdppv, 'Average validation CDPP')) cards.append(('CDPPG', model.cdppg, 'Average GP-de-trended CDPP')) for i in range(99): try: cards.append(('CDPP%02d' % (i + 1), model.cdpp_arr[i] if not np.isnan( model.cdpp_arr[i]) else 0, 'Chunk de-trended CDPP')) cards.append(('CDPPR%02d' % ( i + 1), model.cdppr_arr[i] if not np.isnan( model.cdppr_arr[i]) else 0, 'Chunk raw CDPP')) cards.append(('CDPPV%02d' % (i + 1), model.cdppv_arr[i] if not np.isnan( model.cdppv_arr[i]) else 0, 'Chunk validation CDPP')) except: break cards.append( ('CVMIN', model.cv_min, 'Cross-validation objective function')) cards.append( ('GITER', model.giter, 'Number of GP optimiziation iterations')) cards.append( ('GMAXF', model.giter, 'Max number of GP function evaluations')) cards.append(('GPFACTOR', model.gp_factor, 'GP amplitude initialization factor')) cards.append(('KERNEL', model.kernel, 'GP kernel name')) if model.kernel == 'Basic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append( ('GPTAU', model.kernel_params[2], 'GP red noise timescale (days)')) elif model.kernel == 'QuasiPeriodic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append(('GPGAMMA', model.kernel_params[2], 'GP scale factor')) cards.append(('GPPER', model.kernel_params[3], 'GP period (days)')) for c in range(len(model.breakpoints)): for o in range(model.pld_order): cards.append(('LAMB%02d%02d' % (c + 1, o + 1), model.lam[c][o], 'Cross-validation parameter')) if model.name == 'iPLD': cards.append(('RECL%02d%02d' % (c + 1, o + 1), model.reclam[c][o], 'Cross-validation parameter')) cards.append(('LEPS', model.leps, 'Cross-validation tolerance')) cards.append(('MAXPIX', model.max_pixels, 'Maximum size of TPF aperture')) for i, source in enumerate(model.nearby[:99]): cards.append(('NRBY%02dID' % (i + 1), source['ID'], 'Nearby source ID')) cards.append( ('NRBY%02dX' % (i + 1), source['x'], 'Nearby source X position')) cards.append( ('NRBY%02dY' % (i + 1), source['y'], 'Nearby source Y position')) cards.append( ('NRBY%02dM' % (i + 1), source['mag'], 'Nearby source magnitude')) cards.append(('NRBY%02dX0' % (i + 1), source['x0'], 'Nearby source reference X')) cards.append(('NRBY%02dY0' % (i + 1), source['y0'], 'Nearby source reference Y')) for i, n in enumerate(model.neighbors): cards.append( ('NEIGH%02d' % i, model.neighbors[i], 'Neighboring star used to de-trend')) cards.append(('OITER', model.oiter, 'Number of outlier search iterations')) cards.append(('OPTGP', model.optimize_gp, 'GP optimization performed?')) cards.append( ('OSIGMA', model.osigma, 'Outlier tolerance (standard deviations)')) for i, planet in enumerate(model.planets): cards.append( ('P%02dT0' % (i + 1), planet[0], 'Planet transit time (days)')) cards.append( ('P%02dPER' % (i + 1), planet[1], 'Planet transit period (days)')) cards.append( ('P%02dDUR' % (i + 1), planet[2], 'Planet transit duration (days)')) cards.append(('PLDORDER', model.pld_order, 'PLD de-trending order')) cards.append(('SATUR', model.saturated, 'Is target saturated?')) cards.append(('SATTOL', model.saturation_tolerance, 'Fractional saturation tolerance')) # Add the EVEREST quality flags to the QUALITY array quality = np.array(model.quality) quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1) quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1) quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1) quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1) quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1) # When de-trending, we interpolated to fill in NaN fluxes. Here # we insert the NaNs back in, since there's no actual physical # information at those cadences. flux = np.array(model.flux) flux[model.nanmask] = np.nan # Create the arrays list arrays = [pyfits.Column(name='CADN', format='D', array=model.cadn), pyfits.Column(name='FLUX', format='D', array=flux, unit='e-/s'), pyfits.Column(name='FRAW', format='D', array=model.fraw, unit='e-/s'), pyfits.Column(name='FRAW_ERR', format='D', array=model.fraw_err, unit='e-/s'), pyfits.Column(name='QUALITY', format='J', array=quality), pyfits.Column(name='TIME', format='D', array=model.time, unit='BJD - 2454833')] # Add the CBVs if model.fcor is not None: arrays += [pyfits.Column(name='FCOR', format='D', array=model.fcor, unit='e-/s')] for n in range(model.XCBV.shape[1]): arrays += [pyfits.Column(name='CBV%02d' % (n + 1), format='D', array=model.XCBV[:, n])] # Did we subtract a background term? if hasattr(model.bkg, '__len__'): arrays.append(pyfits.Column(name='BKG', format='D', array=model.bkg, unit='e-/s')) # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='ARRAYS') return hdu
[ "def", "LightcurveHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "1", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "cards", ".", "append", "(", "(", "'MODEL'", ",", "model", ".", "name", ",", "'Name of EVEREST model used'", ")", ")", "cards", ".", "append", "(", "(", "'APNAME'", ",", "model", ".", "aperture_name", ",", "'Name of aperture used'", ")", ")", "cards", ".", "append", "(", "(", "'BPAD'", ",", "model", ".", "bpad", ",", "'Chunk overlap in cadences'", ")", ")", "for", "c", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", ":", "cards", ".", "append", "(", "(", "'BRKPT%02d'", "%", "(", "c", "+", "1", ")", ",", "model", ".", "breakpoints", "[", "c", "]", ",", "'Light curve breakpoint'", ")", ")", "cards", ".", "append", "(", "(", "'CBVNUM'", ",", "model", ".", "cbv_num", ",", "'Number of CBV signals to recover'", ")", ")", "cards", ".", "append", "(", "(", "'CBVNITER'", ",", "model", ".", "cbv_niter", ",", "'Number of CBV SysRem iterations'", ")", ")", "cards", ".", "append", "(", "(", "'CBVWIN'", ",", "model", ".", "cbv_win", ",", "'Window size for smoothing CBVs'", ")", ")", "cards", ".", "append", "(", "(", "'CBVORD'", ",", "model", ".", "cbv_order", ",", "'Order when smoothing CBVs'", ")", ")", "cards", ".", "append", "(", "(", "'CDIVS'", ",", "model", ".", "cdivs", ",", "'Cross-validation subdivisions'", ")", ")", "cards", ".", "append", "(", "(", "'CDPP'", ",", "model", ".", "cdpp", ",", "'Average de-trended CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPR'", ",", "model", ".", "cdppr", ",", "'Raw CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPV'", ",", "model", ".", "cdppv", ",", "'Average validation CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPG'", ",", "model", ".", "cdppg", ",", "'Average GP-de-trended CDPP'", ")", ")", "for", "i", "in", "range", "(", "99", ")", ":", "try", ":", "cards", ".", "append", "(", "(", "'CDPP%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdpp_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdpp_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk de-trended CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPR%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdppr_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdppr_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk raw CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPV%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdppv_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdppv_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk validation CDPP'", ")", ")", "except", ":", "break", "cards", ".", "append", "(", "(", "'CVMIN'", ",", "model", ".", "cv_min", ",", "'Cross-validation objective function'", ")", ")", "cards", ".", "append", "(", "(", "'GITER'", ",", "model", ".", "giter", ",", "'Number of GP optimiziation iterations'", ")", ")", "cards", ".", "append", "(", "(", "'GMAXF'", ",", "model", ".", "giter", ",", "'Max number of GP function evaluations'", ")", ")", "cards", ".", "append", "(", "(", "'GPFACTOR'", ",", "model", ".", "gp_factor", ",", "'GP amplitude initialization factor'", ")", ")", "cards", ".", "append", "(", "(", "'KERNEL'", ",", "model", ".", "kernel", ",", "'GP kernel name'", ")", ")", "if", "model", ".", "kernel", "==", "'Basic'", ":", "cards", ".", "append", "(", "(", "'GPWHITE'", ",", "model", ".", "kernel_params", "[", "0", "]", ",", "'GP white noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPRED'", ",", "model", ".", "kernel_params", "[", "1", "]", ",", "'GP red noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPTAU'", ",", "model", ".", "kernel_params", "[", "2", "]", ",", "'GP red noise timescale (days)'", ")", ")", "elif", "model", ".", "kernel", "==", "'QuasiPeriodic'", ":", "cards", ".", "append", "(", "(", "'GPWHITE'", ",", "model", ".", "kernel_params", "[", "0", "]", ",", "'GP white noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPRED'", ",", "model", ".", "kernel_params", "[", "1", "]", ",", "'GP red noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPGAMMA'", ",", "model", ".", "kernel_params", "[", "2", "]", ",", "'GP scale factor'", ")", ")", "cards", ".", "append", "(", "(", "'GPPER'", ",", "model", ".", "kernel_params", "[", "3", "]", ",", "'GP period (days)'", ")", ")", "for", "c", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", ":", "for", "o", "in", "range", "(", "model", ".", "pld_order", ")", ":", "cards", ".", "append", "(", "(", "'LAMB%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", ",", "model", ".", "lam", "[", "c", "]", "[", "o", "]", ",", "'Cross-validation parameter'", ")", ")", "if", "model", ".", "name", "==", "'iPLD'", ":", "cards", ".", "append", "(", "(", "'RECL%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", ",", "model", ".", "reclam", "[", "c", "]", "[", "o", "]", ",", "'Cross-validation parameter'", ")", ")", "cards", ".", "append", "(", "(", "'LEPS'", ",", "model", ".", "leps", ",", "'Cross-validation tolerance'", ")", ")", "cards", ".", "append", "(", "(", "'MAXPIX'", ",", "model", ".", "max_pixels", ",", "'Maximum size of TPF aperture'", ")", ")", "for", "i", ",", "source", "in", "enumerate", "(", "model", ".", "nearby", "[", ":", "99", "]", ")", ":", "cards", ".", "append", "(", "(", "'NRBY%02dID'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'ID'", "]", ",", "'Nearby source ID'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dX'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'x'", "]", ",", "'Nearby source X position'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dY'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'y'", "]", ",", "'Nearby source Y position'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dM'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'mag'", "]", ",", "'Nearby source magnitude'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dX0'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'x0'", "]", ",", "'Nearby source reference X'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dY0'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'y0'", "]", ",", "'Nearby source reference Y'", ")", ")", "for", "i", ",", "n", "in", "enumerate", "(", "model", ".", "neighbors", ")", ":", "cards", ".", "append", "(", "(", "'NEIGH%02d'", "%", "i", ",", "model", ".", "neighbors", "[", "i", "]", ",", "'Neighboring star used to de-trend'", ")", ")", "cards", ".", "append", "(", "(", "'OITER'", ",", "model", ".", "oiter", ",", "'Number of outlier search iterations'", ")", ")", "cards", ".", "append", "(", "(", "'OPTGP'", ",", "model", ".", "optimize_gp", ",", "'GP optimization performed?'", ")", ")", "cards", ".", "append", "(", "(", "'OSIGMA'", ",", "model", ".", "osigma", ",", "'Outlier tolerance (standard deviations)'", ")", ")", "for", "i", ",", "planet", "in", "enumerate", "(", "model", ".", "planets", ")", ":", "cards", ".", "append", "(", "(", "'P%02dT0'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "0", "]", ",", "'Planet transit time (days)'", ")", ")", "cards", ".", "append", "(", "(", "'P%02dPER'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "1", "]", ",", "'Planet transit period (days)'", ")", ")", "cards", ".", "append", "(", "(", "'P%02dDUR'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "2", "]", ",", "'Planet transit duration (days)'", ")", ")", "cards", ".", "append", "(", "(", "'PLDORDER'", ",", "model", ".", "pld_order", ",", "'PLD de-trending order'", ")", ")", "cards", ".", "append", "(", "(", "'SATUR'", ",", "model", ".", "saturated", ",", "'Is target saturated?'", ")", ")", "cards", ".", "append", "(", "(", "'SATTOL'", ",", "model", ".", "saturation_tolerance", ",", "'Fractional saturation tolerance'", ")", ")", "# Add the EVEREST quality flags to the QUALITY array", "quality", "=", "np", ".", "array", "(", "model", ".", "quality", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "badmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_BAD", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "nanmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_NAN", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "outmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_OUT", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "recmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_REC", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "transitmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_TRN", "-", "1", ")", "# When de-trending, we interpolated to fill in NaN fluxes. Here", "# we insert the NaNs back in, since there's no actual physical", "# information at those cadences.", "flux", "=", "np", ".", "array", "(", "model", ".", "flux", ")", "flux", "[", "model", ".", "nanmask", "]", "=", "np", ".", "nan", "# Create the arrays list", "arrays", "=", "[", "pyfits", ".", "Column", "(", "name", "=", "'CADN'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "cadn", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FLUX'", ",", "format", "=", "'D'", ",", "array", "=", "flux", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FRAW'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fraw", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FRAW_ERR'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fraw_err", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'QUALITY'", ",", "format", "=", "'J'", ",", "array", "=", "quality", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'TIME'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "time", ",", "unit", "=", "'BJD - 2454833'", ")", "]", "# Add the CBVs", "if", "model", ".", "fcor", "is", "not", "None", ":", "arrays", "+=", "[", "pyfits", ".", "Column", "(", "name", "=", "'FCOR'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fcor", ",", "unit", "=", "'e-/s'", ")", "]", "for", "n", "in", "range", "(", "model", ".", "XCBV", ".", "shape", "[", "1", "]", ")", ":", "arrays", "+=", "[", "pyfits", ".", "Column", "(", "name", "=", "'CBV%02d'", "%", "(", "n", "+", "1", ")", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "XCBV", "[", ":", ",", "n", "]", ")", "]", "# Did we subtract a background term?", "if", "hasattr", "(", "model", ".", "bkg", ",", "'__len__'", ")", ":", "arrays", ".", "append", "(", "pyfits", ".", "Column", "(", "name", "=", "'BKG'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "bkg", ",", "unit", "=", "'e-/s'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "cols", "=", "pyfits", ".", "ColDefs", "(", "arrays", ")", "hdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "cols", ",", "header", "=", "header", ",", "name", "=", "'ARRAYS'", ")", "return", "hdu" ]
47.743902
[ 0.04, 0.2857142857142857, 0.025974025974025976, 0, 0.2857142857142857, 0, 0.08695652173913043, 0.037037037037037035, 0, 0.09090909090909091, 0.03508771929824561, 0.03508771929824561, 0.03508771929824561, 0.03333333333333333, 0.02564102564102564, 0.02631578947368421, 0.06382978723404255, 0.06557377049180328, 0.028985507246376812, 0.02702702702702703, 0.029850746268656716, 0.046511627906976744, 0.14285714285714285, 0.05263157894736842, 0.10256410256410256, 0.02531645569620253, 0.06521739130434782, 0.07547169811320754, 0.025974025974025976, 0.02702702702702703, 0.0273972602739726, 0.03076923076923077, 0.038461538461538464, 0.029850746268656716, 0.02857142857142857, 0.08695652173913043, 0.16666666666666666, 0.06382978723404255, 0.06666666666666667, 0.04411764705882353, 0.07317073170731707, 0.03389830508474576, 0.04838709677419355, 0.0625, 0.06557377049180328, 0.043478260869565216, 0.2, 0.11764705882352941, 0.17647058823529413, 0.04225352112676056, 0.17647058823529413, 0.041666666666666664, 0.17647058823529413, 0.041666666666666664, 0.06521739130434782, 0.07142857142857142, 0.03333333333333333, 0.06451612903225806, 0.14285714285714285, 0.06382978723404255, 0.08333333333333333, 0.14285714285714285, 0.06666666666666667, 0.08695652173913043, 0.14285714285714285, 0.06666666666666667, 0.08695652173913043, 0.04878048780487805, 0.14285714285714285, 0.06382978723404255, 0.08333333333333333, 0.14285714285714285, 0.0379746835443038, 0.02631578947368421, 0.02666666666666667, 0.046511627906976744, 0.05, 0.05172413793103448, 0.0547945205479452, 0.05555555555555555, 0.04838709677419355, 0.061224489795918366, 0.06666666666666667, 0.029411764705882353, 0.02564102564102564, 0.04, 0.08333333333333333, 0.06153846153846154, 0.14285714285714285, 0.03896103896103896, 0.14285714285714285, 0.03896103896103896, 0.14285714285714285, 0.038461538461538464, 0.08333333333333333, 0.05405405405405406, 0.08333333333333333, 0.05405405405405406, 0.046511627906976744, 0.14285714285714285, 0.061224489795918366, 0.08, 0.02531645569620253, 0.02631578947368421, 0.17647058823529413, 0.039473684210526314, 0.043478260869565216, 0.14285714285714285, 0.04, 0.14285714285714285, 0.038461538461538464, 0.14285714285714285, 0.06666666666666667, 0.0851063829787234, 0.027777777777777776, 0.029411764705882353, 0.05454545454545454, 0.07547169811320754, 0, 0.03571428571428571, 0.05405405405405406, 0.0273972602739726, 0.0273972602739726, 0.0273972602739726, 0.0273972602739726, 0.025974025974025976, 0, 0.029850746268656716, 0.030303030303030304, 0.05555555555555555, 0.06451612903225806, 0.0625, 0, 0.07142857142857142, 0.04225352112676056, 0.038461538461538464, 0.07692307692307693, 0.0847457627118644, 0.07142857142857142, 0.07936507936507936, 0.04225352112676056, 0.07692307692307693, 0.07352941176470588, 0, 0.1111111111111111, 0.06666666666666667, 0.05263157894736842, 0.09375, 0.045454545454545456, 0.05660377358490566, 0.07017543859649122, 0.08196721311475409, 0, 0.05, 0.05405405405405406, 0.05084745762711865, 0.07575757575757576, 0, 0.1, 0.05128205128205128, 0.06060606060606061, 0.025974025974025976, 0, 0.14285714285714285 ]
def up(self) -> "InstanceNode": """Return an instance node corresponding to the receiver's parent. Raises: NonexistentInstance: If there is no parent. """ ts = max(self.timestamp, self.parinst.timestamp) return self.parinst._copy(self._zip(), ts)
[ "def", "up", "(", "self", ")", "->", "\"InstanceNode\"", ":", "ts", "=", "max", "(", "self", ".", "timestamp", ",", "self", ".", "parinst", ".", "timestamp", ")", "return", "self", ".", "parinst", ".", "_copy", "(", "self", ".", "_zip", "(", ")", ",", "ts", ")" ]
36.5
[ 0.03225806451612903, 0.02702702702702703, 0, 0.13333333333333333, 0.03636363636363636, 0.18181818181818182, 0.03571428571428571, 0.04 ]
def make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate): """Returns JSON formatted for a single mutant chart. Args: mutant_features: An iterable of `MutantFeatureValue`s representing the X-axis. inference_result_proto: A ClassificationResponse or RegressionResponse returned by Servo, representing the Y-axis. It contains one 'classification' or 'regression' for every Example that was sent for inference. The length of that field should be the same length of mutant_features. index_to_mutate: The index of the feature being mutated for this chart. Returns: A JSON-able dict for rendering a single mutant chart, parseable by `vz-line-chart` or `vz-bar-chart`. """ x_label = 'step' y_label = 'scalar' if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): # classification_label -> [{x_label: y_label:}] series = {} # ClassificationResponse has a separate probability for each label for idx, classification in enumerate( inference_result_proto.result.classifications): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] for class_index, classification_class in enumerate( classification.classes): # Fill in class index when labels are missing if classification_class.label == '': classification_class.label = str(class_index) # Special case to not include the "0" class in binary classification. # Since that just results in a chart that is symmetric around 0.5. if len( classification.classes) == 2 and classification_class.label == '0': continue key = classification_class.label if index_to_mutate: key += ' (index %d)' % index_to_mutate if not key in series: series[key] = {} if not mutant_feature.mutant_value in series[key]: series[key][mutant_feature.mutant_value] = [] series[key][mutant_feature.mutant_value].append( classification_class.score) # Post-process points to have separate list for each class return_series = collections.defaultdict(list) for key, mutant_values in iteritems(series): for value, y_list in iteritems(mutant_values): return_series[key].append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) return_series[key].sort(key=lambda p: p[x_label]) return return_series elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): points = {} for idx, regression in enumerate(inference_result_proto.result.regressions): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] if not mutant_feature.mutant_value in points: points[mutant_feature.mutant_value] = [] points[mutant_feature.mutant_value].append(regression.value) key = 'value' if (index_to_mutate != 0): key += ' (index %d)' % index_to_mutate list_of_points = [] for value, y_list in iteritems(points): list_of_points.append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) list_of_points.sort(key=lambda p: p[x_label]) return {key: list_of_points} else: raise NotImplementedError('Only classification and regression implemented.')
[ "def", "make_json_formatted_for_single_chart", "(", "mutant_features", ",", "inference_result_proto", ",", "index_to_mutate", ")", ":", "x_label", "=", "'step'", "y_label", "=", "'scalar'", "if", "isinstance", "(", "inference_result_proto", ",", "classification_pb2", ".", "ClassificationResponse", ")", ":", "# classification_label -> [{x_label: y_label:}]", "series", "=", "{", "}", "# ClassificationResponse has a separate probability for each label", "for", "idx", ",", "classification", "in", "enumerate", "(", "inference_result_proto", ".", "result", ".", "classifications", ")", ":", "# For each example to use for mutant inference, we create a copied example", "# with the feature in question changed to each possible mutant value. So", "# when we get the inferences back, we get num_examples*num_mutants", "# results. So, modding by len(mutant_features) allows us to correctly", "# lookup the mutant value for each inference.", "mutant_feature", "=", "mutant_features", "[", "idx", "%", "len", "(", "mutant_features", ")", "]", "for", "class_index", ",", "classification_class", "in", "enumerate", "(", "classification", ".", "classes", ")", ":", "# Fill in class index when labels are missing", "if", "classification_class", ".", "label", "==", "''", ":", "classification_class", ".", "label", "=", "str", "(", "class_index", ")", "# Special case to not include the \"0\" class in binary classification.", "# Since that just results in a chart that is symmetric around 0.5.", "if", "len", "(", "classification", ".", "classes", ")", "==", "2", "and", "classification_class", ".", "label", "==", "'0'", ":", "continue", "key", "=", "classification_class", ".", "label", "if", "index_to_mutate", ":", "key", "+=", "' (index %d)'", "%", "index_to_mutate", "if", "not", "key", "in", "series", ":", "series", "[", "key", "]", "=", "{", "}", "if", "not", "mutant_feature", ".", "mutant_value", "in", "series", "[", "key", "]", ":", "series", "[", "key", "]", "[", "mutant_feature", ".", "mutant_value", "]", "=", "[", "]", "series", "[", "key", "]", "[", "mutant_feature", ".", "mutant_value", "]", ".", "append", "(", "classification_class", ".", "score", ")", "# Post-process points to have separate list for each class", "return_series", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "key", ",", "mutant_values", "in", "iteritems", "(", "series", ")", ":", "for", "value", ",", "y_list", "in", "iteritems", "(", "mutant_values", ")", ":", "return_series", "[", "key", "]", ".", "append", "(", "{", "x_label", ":", "value", ",", "y_label", ":", "sum", "(", "y_list", ")", "/", "float", "(", "len", "(", "y_list", ")", ")", "}", ")", "return_series", "[", "key", "]", ".", "sort", "(", "key", "=", "lambda", "p", ":", "p", "[", "x_label", "]", ")", "return", "return_series", "elif", "isinstance", "(", "inference_result_proto", ",", "regression_pb2", ".", "RegressionResponse", ")", ":", "points", "=", "{", "}", "for", "idx", ",", "regression", "in", "enumerate", "(", "inference_result_proto", ".", "result", ".", "regressions", ")", ":", "# For each example to use for mutant inference, we create a copied example", "# with the feature in question changed to each possible mutant value. So", "# when we get the inferences back, we get num_examples*num_mutants", "# results. So, modding by len(mutant_features) allows us to correctly", "# lookup the mutant value for each inference.", "mutant_feature", "=", "mutant_features", "[", "idx", "%", "len", "(", "mutant_features", ")", "]", "if", "not", "mutant_feature", ".", "mutant_value", "in", "points", ":", "points", "[", "mutant_feature", ".", "mutant_value", "]", "=", "[", "]", "points", "[", "mutant_feature", ".", "mutant_value", "]", ".", "append", "(", "regression", ".", "value", ")", "key", "=", "'value'", "if", "(", "index_to_mutate", "!=", "0", ")", ":", "key", "+=", "' (index %d)'", "%", "index_to_mutate", "list_of_points", "=", "[", "]", "for", "value", ",", "y_list", "in", "iteritems", "(", "points", ")", ":", "list_of_points", ".", "append", "(", "{", "x_label", ":", "value", ",", "y_label", ":", "sum", "(", "y_list", ")", "/", "float", "(", "len", "(", "y_list", ")", ")", "}", ")", "list_of_points", ".", "sort", "(", "key", "=", "lambda", "p", ":", "p", "[", "x_label", "]", ")", "return", "{", "key", ":", "list_of_points", "}", "else", ":", "raise", "NotImplementedError", "(", "'Only classification and regression implemented.'", ")" ]
43.659574
[ 0.03508771929824561, 0.046875, 0.06896551724137931, 0.037037037037037035, 0, 0.42857142857142855, 0.04054054054054054, 0.23076923076923078, 0.02702702702702703, 0.061224489795918366, 0.03896103896103896, 0.05, 0.12, 0.02666666666666667, 0, 0.3, 0.02857142857142857, 0.07894736842105263, 0.4, 0.16666666666666666, 0.15, 0, 0.10256410256410256, 0.05084745762711865, 0.0392156862745098, 0.13333333333333333, 0, 0.02857142857142857, 0.07317073170731707, 0.05454545454545454, 0.05, 0.038461538461538464, 0.041666666666666664, 0.04, 0.058823529411764705, 0.045454545454545456, 0.07017543859649122, 0.09375, 0.03773584905660377, 0.045454545454545456, 0.05454545454545454, 0.025974025974025976, 0.02702702702702703, 0.2, 0.0379746835443038, 0.16666666666666666, 0.05, 0.07407407407407407, 0.0625, 0.10344827586206896, 0.11538461538461539, 0.05172413793103448, 0.05454545454545454, 0.05357142857142857, 0.10810810810810811, 0, 0.03225806451612903, 0.04081632653061224, 0.041666666666666664, 0.057692307692307696, 0.08571428571428572, 0.12, 0.058823529411764705, 0.3, 0.05454545454545454, 0.08333333333333333, 0, 0.03896103896103896, 0.13333333333333333, 0, 0.0375, 0.05, 0.038461538461538464, 0.041666666666666664, 0.04, 0.058823529411764705, 0.045454545454545456, 0.0784313725490196, 0.041666666666666664, 0.045454545454545456, 0.11764705882352941, 0.06666666666666667, 0.06818181818181818, 0.08695652173913043, 0.046511627906976744, 0.13793103448275862, 0.08695652173913043, 0.04081632653061224, 0.5, 0.04081632653061224, 0.0625, 0, 0.42857142857142855, 0.0375 ]
def _find_by_nsp(self, browser, criteria, tag, constraints): """Find element matches by iOSNsPredicateString.""" return self._filter_elements( browser.find_elements_by_ios_predicate(criteria), tag, constraints)
[ "def", "_find_by_nsp", "(", "self", ",", "browser", ",", "criteria", ",", "tag", ",", "constraints", ")", ":", "return", "self", ".", "_filter_elements", "(", "browser", ".", "find_elements_by_ios_predicate", "(", "criteria", ")", ",", "tag", ",", "constraints", ")" ]
50.2
[ 0.01639344262295082, 0.01639344262295082, 0.05263157894736842, 0.03225806451612903, 0.10344827586206896 ]
def send_discover(self): """Send discover.""" assert self.client assert self.current_state == STATE_INIT or \ self.current_state == STATE_SELECTING pkt = self.client.gen_discover() sendp(pkt) # FIXME:20 check that this is correct,: all or only discover? if self.discover_attempts < MAX_ATTEMPTS_DISCOVER: self.discover_attempts += 1 timeout = gen_timeout_resend(self.discover_attempts) self.set_timeout(self.current_state, self.timeout_selecting, timeout)
[ "def", "send_discover", "(", "self", ")", ":", "assert", "self", ".", "client", "assert", "self", ".", "current_state", "==", "STATE_INIT", "or", "self", ".", "current_state", "==", "STATE_SELECTING", "pkt", "=", "self", ".", "client", ".", "gen_discover", "(", ")", "sendp", "(", "pkt", ")", "# FIXME:20 check that this is correct,: all or only discover?", "if", "self", ".", "discover_attempts", "<", "MAX_ATTEMPTS_DISCOVER", ":", "self", ".", "discover_attempts", "+=", "1", "timeout", "=", "gen_timeout_resend", "(", "self", ".", "discover_attempts", ")", "self", ".", "set_timeout", "(", "self", ".", "current_state", ",", "self", ".", "timeout_selecting", ",", "timeout", ")" ]
42
[ 0.041666666666666664, 0.07142857142857142, 0.07692307692307693, 0.038461538461538464, 0.04081632653061224, 0.05, 0.1111111111111111, 0.028985507246376812, 0.034482758620689655, 0.05128205128205128, 0.03333333333333333, 0.06818181818181818, 0.0625, 0.12121212121212122 ]
def get_function_in_models(service, operation): """refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members if not hasattr(op_model.output_shape, 'members'): outputs = {} else: outputs = op_model.output_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] if input_names: body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names)) else: body = 'def {}(self)\n' body += ' # implement here\n' body += ' return {}\n\n'.format(', '.join(output_names)) return body
[ "def", "get_function_in_models", "(", "service", ",", "operation", ")", ":", "client", "=", "boto3", ".", "client", "(", "service", ")", "aws_operation_name", "=", "to_upper_camel_case", "(", "operation", ")", "op_model", "=", "client", ".", "_service_model", ".", "operation_model", "(", "aws_operation_name", ")", "inputs", "=", "op_model", ".", "input_shape", ".", "members", "if", "not", "hasattr", "(", "op_model", ".", "output_shape", ",", "'members'", ")", ":", "outputs", "=", "{", "}", "else", ":", "outputs", "=", "op_model", ".", "output_shape", ".", "members", "input_names", "=", "[", "to_snake_case", "(", "_", ")", "for", "_", "in", "inputs", ".", "keys", "(", ")", "if", "_", "not", "in", "INPUT_IGNORED_IN_BACKEND", "]", "output_names", "=", "[", "to_snake_case", "(", "_", ")", "for", "_", "in", "outputs", ".", "keys", "(", ")", "if", "_", "not", "in", "OUTPUT_IGNORED_IN_BACKEND", "]", "if", "input_names", ":", "body", "=", "'def {}(self, {}):\\n'", ".", "format", "(", "operation", ",", "', '", ".", "join", "(", "input_names", ")", ")", "else", ":", "body", "=", "'def {}(self)\\n'", "body", "+=", "' # implement here\\n'", "body", "+=", "' return {}\\n\\n'", ".", "format", "(", "', '", ".", "join", "(", "output_names", ")", ")", "return", "body" ]
45.695652
[ 0.02127659574468085, 0.02702702702702703, 0.04081632653061224, 0.05154639175257732, 0.2857142857142857, 0.058823529411764705, 0.03636363636363636, 0.027777777777777776, 0.04878048780487805, 0.03773584905660377, 0.1, 0.2222222222222222, 0.0425531914893617, 0.03125, 0.030303030303030304, 0.10526315789473684, 0.02564102564102564, 0.2222222222222222, 0.06451612903225806, 0.05555555555555555, 0.031746031746031744, 0, 0.13333333333333333 ]
def division_content(self, election_day, division, special=False): """ Return serialized content for a division page. """ from electionnight.models import PageType division_type = ContentType.objects.get_for_model(division) page_type = PageType.objects.get( model_type=division_type, election_day=election_day, division_level=division.level, ) page_content = self.get( content_type__pk=division_type.pk, object_id=division.pk, election_day=election_day, special_election=special, ) page_type_content = self.get( content_type=ContentType.objects.get_for_model(page_type), object_id=page_type.pk, election_day=election_day, ) return { "site": self.site_content(election_day)["site"], "page_type": self.serialize_content_blocks(page_type_content), "page": self.serialize_content_blocks(page_content), }
[ "def", "division_content", "(", "self", ",", "election_day", ",", "division", ",", "special", "=", "False", ")", ":", "from", "electionnight", ".", "models", "import", "PageType", "division_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "division", ")", "page_type", "=", "PageType", ".", "objects", ".", "get", "(", "model_type", "=", "division_type", ",", "election_day", "=", "election_day", ",", "division_level", "=", "division", ".", "level", ",", ")", "page_content", "=", "self", ".", "get", "(", "content_type__pk", "=", "division_type", ".", "pk", ",", "object_id", "=", "division", ".", "pk", ",", "election_day", "=", "election_day", ",", "special_election", "=", "special", ",", ")", "page_type_content", "=", "self", ".", "get", "(", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "page_type", ")", ",", "object_id", "=", "page_type", ".", "pk", ",", "election_day", "=", "election_day", ",", ")", "return", "{", "\"site\"", ":", "self", ".", "site_content", "(", "election_day", ")", "[", "\"site\"", "]", ",", "\"page_type\"", ":", "self", ".", "serialize_content_blocks", "(", "page_type_content", ")", ",", "\"page\"", ":", "self", ".", "serialize_content_blocks", "(", "page_content", ")", ",", "}" ]
36.892857
[ 0.015151515151515152, 0.18181818181818182, 0.037037037037037035, 0.18181818181818182, 0.04081632653061224, 0, 0.029850746268656716, 0.07317073170731707, 0.08108108108108109, 0.07894736842105263, 0.07142857142857142, 0.3333333333333333, 0.09375, 0.06521739130434782, 0.08823529411764706, 0.07894736842105263, 0.08108108108108109, 0.3333333333333333, 0.08108108108108109, 0.04285714285714286, 0.08571428571428572, 0.07894736842105263, 0.3333333333333333, 0.1875, 0.03333333333333333, 0.02702702702702703, 0.03125, 0.3333333333333333 ]
def MeshViewers( shape=(1, 1), titlebar="Mesh Viewers", keepalive=False, window_width=1280, window_height=960 ): """Allows subplot-style inspection of primitives in multiple subwindows. Args: shape: a tuple indicating the number of vertical and horizontal windows requested Returns: a list of lists of MeshViewer objects: one per window requested. """ if not test_for_opengl(): return Dummy() mv = MeshViewerLocal( shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive, window_width=window_width, window_height=window_height ) return mv.get_subwindows()
[ "def", "MeshViewers", "(", "shape", "=", "(", "1", ",", "1", ")", ",", "titlebar", "=", "\"Mesh Viewers\"", ",", "keepalive", "=", "False", ",", "window_width", "=", "1280", ",", "window_height", "=", "960", ")", ":", "if", "not", "test_for_opengl", "(", ")", ":", "return", "Dummy", "(", ")", "mv", "=", "MeshViewerLocal", "(", "shape", "=", "shape", ",", "titlebar", "=", "titlebar", ",", "uid", "=", "None", ",", "keepalive", "=", "keepalive", ",", "window_width", "=", "window_width", ",", "window_height", "=", "window_height", ")", "return", "mv", ".", "get_subwindows", "(", ")" ]
34.777778
[ 0.125, 0.07936507936507936, 0.09090909090909091, 1, 0.02631578947368421, 0, 0.2222222222222222, 0.033707865168539325, 0, 0.025974025974025976, 0.2857142857142857, 0.06896551724137931, 0.09090909090909091, 0.12, 0.08571428571428572, 0.06451612903225806, 0.6, 0.06666666666666667 ]
def access_token(self): ''' a method to acquire an oauth access token ''' title = '%s.access_token' % self.__class__.__name__ # import dependencies from time import time import requests # construct request kwargs request_kwargs = { 'url': self.token_endpoint, 'data': { 'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'client_credentials' } } # send request try: current_time = time() response = requests.post(**request_kwargs) except Exception: if self.requests_handler: request_kwargs['method'] = 'POST' request_object = requests.Request(**request_kwargs) return self.requests_handler(request_object) else: raise response_details = self.response_handler.handle(response) if response_details['json']: self._access_token = response_details['json']['access_token'] expires_in = response_details['json']['expires_in'] self.expires_at = current_time + expires_in return self._access_token
[ "def", "access_token", "(", "self", ")", ":", "title", "=", "'%s.access_token'", "%", "self", ".", "__class__", ".", "__name__", "# import dependencies", "from", "time", "import", "time", "import", "requests", "# construct request kwargs", "request_kwargs", "=", "{", "'url'", ":", "self", ".", "token_endpoint", ",", "'data'", ":", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", ",", "'grant_type'", ":", "'client_credentials'", "}", "}", "# send request", "try", ":", "current_time", "=", "time", "(", ")", "response", "=", "requests", ".", "post", "(", "*", "*", "request_kwargs", ")", "except", "Exception", ":", "if", "self", ".", "requests_handler", ":", "request_kwargs", "[", "'method'", "]", "=", "'POST'", "request_object", "=", "requests", ".", "Request", "(", "*", "*", "request_kwargs", ")", "return", "self", ".", "requests_handler", "(", "request_object", ")", "else", ":", "raise", "response_details", "=", "self", ".", "response_handler", ".", "handle", "(", "response", ")", "if", "response_details", "[", "'json'", "]", ":", "self", ".", "_access_token", "=", "response_details", "[", "'json'", "]", "[", "'access_token'", "]", "expires_in", "=", "response_details", "[", "'json'", "]", "[", "'expires_in'", "]", "self", ".", "expires_at", "=", "current_time", "+", "expires_in", "return", "self", ".", "_access_token" ]
30.85
[ 0.043478260869565216, 0, 0.03508771929824561, 0, 0.03389830508474576, 0.5, 0.08, 0.06896551724137931, 0.08695652173913043, 0, 0.06666666666666667, 0.11538461538461539, 0.05128205128205128, 0.14285714285714285, 0.045454545454545456, 0.038461538461538464, 0.04, 0.23076923076923078, 0.3333333333333333, 0, 0.1111111111111111, 0.16666666666666666, 0.06060606060606061, 0.037037037037037035, 0.08, 0.05405405405405406, 0.04081632653061224, 0.029850746268656716, 0.03333333333333333, 0.11764705882352941, 0.09523809523809523, 0, 0.03076923076923077, 0, 0.05555555555555555, 0.0273972602739726, 0.031746031746031744, 0.03636363636363636, 0.16666666666666666, 0.06060606060606061 ]
def debugExperiment(logFile): """ Debug a thing experiment given a logFile """ exp = rerunExperimentFromLogfile(logFile) exp.logCalls = False L2Representations = exp.objectL2Representations print "Learned object representations:" pprint.pprint(L2Representations, width=400) print "==========================" print "\nRun inference with a point on the capsule" sensationList = [ {0: getObjectPair("Capsule", 0)}, ] exp.infer(sensationList, reset= False) print "Output for capsule:", exp.getL2Representations() print "Intersection with sphere:", len( exp.getL2Representations()[0] & L2Representations["Sphere"][0]) print "Intersection with capsule:", len( exp.getL2Representations()[0] & L2Representations["Capsule"][0]) print "Intersection with cube:", len( exp.getL2Representations()[0] & L2Representations["Cube"][0]) exp.sendReset() print "\nRun inference with a point on the sphere" sensationList = [ {0: getObjectPair("Sphere", 0)}, ] exp.infer(sensationList, reset= False) print "Output for sphere:", exp.getL2Representations() print "Intersection with sphere:", len( exp.getL2Representations()[0] & L2Representations["Sphere"][0]) print "Intersection with Capsule:", len( exp.getL2Representations()[0] & L2Representations["Capsule"][0]) print "Intersection with cube:", len( exp.getL2Representations()[0] & L2Representations["Cube"][0]) exp.sendReset() print "\nRun inference with two points on the sphere" sensationList = [ {0: getObjectPair("Sphere", 0)}, {0: getObjectPair("Sphere", 2)}, ] exp.infer(sensationList, reset= False) print "Output for sphere:", exp.getL2Representations() print "Intersection with sphere:", len( exp.getL2Representations()[0] & L2Representations["Sphere"][0]) print "Intersection with Capsule:", len( exp.getL2Representations()[0] & L2Representations["Capsule"][0]) print "Intersection with cube:", len( exp.getL2Representations()[0] & L2Representations["Cube"][0]) exp.sendReset() print "\nRun inference with a point on the cube" sensationList = [ {0: getObjectPair("Cube", 2)}, ] exp.infer(sensationList, reset= False) print "Output for cube:", exp.getL2Representations() print "Intersection with sphere:", len( exp.getL2Representations()[0] & L2Representations["Sphere"][0]) print "Intersection with Capsule:", len( exp.getL2Representations()[0] & L2Representations["Capsule"][0]) print "Intersection with cube:", len( exp.getL2Representations()[0] & L2Representations["Cube"][0]) exp.sendReset()
[ "def", "debugExperiment", "(", "logFile", ")", ":", "exp", "=", "rerunExperimentFromLogfile", "(", "logFile", ")", "exp", ".", "logCalls", "=", "False", "L2Representations", "=", "exp", ".", "objectL2Representations", "print", "\"Learned object representations:\"", "pprint", ".", "pprint", "(", "L2Representations", ",", "width", "=", "400", ")", "print", "\"==========================\"", "print", "\"\\nRun inference with a point on the capsule\"", "sensationList", "=", "[", "{", "0", ":", "getObjectPair", "(", "\"Capsule\"", ",", "0", ")", "}", ",", "]", "exp", ".", "infer", "(", "sensationList", ",", "reset", "=", "False", ")", "print", "\"Output for capsule:\"", ",", "exp", ".", "getL2Representations", "(", ")", "print", "\"Intersection with sphere:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Sphere\"", "]", "[", "0", "]", ")", "print", "\"Intersection with capsule:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Capsule\"", "]", "[", "0", "]", ")", "print", "\"Intersection with cube:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Cube\"", "]", "[", "0", "]", ")", "exp", ".", "sendReset", "(", ")", "print", "\"\\nRun inference with a point on the sphere\"", "sensationList", "=", "[", "{", "0", ":", "getObjectPair", "(", "\"Sphere\"", ",", "0", ")", "}", ",", "]", "exp", ".", "infer", "(", "sensationList", ",", "reset", "=", "False", ")", "print", "\"Output for sphere:\"", ",", "exp", ".", "getL2Representations", "(", ")", "print", "\"Intersection with sphere:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Sphere\"", "]", "[", "0", "]", ")", "print", "\"Intersection with Capsule:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Capsule\"", "]", "[", "0", "]", ")", "print", "\"Intersection with cube:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Cube\"", "]", "[", "0", "]", ")", "exp", ".", "sendReset", "(", ")", "print", "\"\\nRun inference with two points on the sphere\"", "sensationList", "=", "[", "{", "0", ":", "getObjectPair", "(", "\"Sphere\"", ",", "0", ")", "}", ",", "{", "0", ":", "getObjectPair", "(", "\"Sphere\"", ",", "2", ")", "}", ",", "]", "exp", ".", "infer", "(", "sensationList", ",", "reset", "=", "False", ")", "print", "\"Output for sphere:\"", ",", "exp", ".", "getL2Representations", "(", ")", "print", "\"Intersection with sphere:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Sphere\"", "]", "[", "0", "]", ")", "print", "\"Intersection with Capsule:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Capsule\"", "]", "[", "0", "]", ")", "print", "\"Intersection with cube:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Cube\"", "]", "[", "0", "]", ")", "exp", ".", "sendReset", "(", ")", "print", "\"\\nRun inference with a point on the cube\"", "sensationList", "=", "[", "{", "0", ":", "getObjectPair", "(", "\"Cube\"", ",", "2", ")", "}", ",", "]", "exp", ".", "infer", "(", "sensationList", ",", "reset", "=", "False", ")", "print", "\"Output for cube:\"", ",", "exp", ".", "getL2Representations", "(", ")", "print", "\"Intersection with sphere:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Sphere\"", "]", "[", "0", "]", ")", "print", "\"Intersection with Capsule:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Capsule\"", "]", "[", "0", "]", ")", "print", "\"Intersection with cube:\"", ",", "len", "(", "exp", ".", "getL2Representations", "(", ")", "[", "0", "]", "&", "L2Representations", "[", "\"Cube\"", "]", "[", "0", "]", ")", "exp", ".", "sendReset", "(", ")" ]
36.185714
[ 0.034482758620689655, 0.4, 0.07142857142857142, 0.4, 0, 0.06976744186046512, 0.13636363636363635, 0, 0.061224489795918366, 0.07317073170731707, 0.06666666666666667, 0.08333333333333333, 0, 0.05660377358490566, 0.21052631578947367, 0.05405405405405406, 1.3333333333333333, 0.1, 0.05263157894736842, 0.0975609756097561, 0.04477611940298507, 0.09523809523809523, 0.04411764705882353, 0.10256410256410256, 0.046153846153846156, 0.17647058823529413, 0, 0.057692307692307696, 0.21052631578947367, 0.05555555555555555, 1.3333333333333333, 0.1, 0.05357142857142857, 0.0975609756097561, 0.04477611940298507, 0.09523809523809523, 0.04411764705882353, 0.10256410256410256, 0.046153846153846156, 0.17647058823529413, 0, 0.05454545454545454, 0.21052631578947367, 0.05555555555555555, 0.05555555555555555, 1.3333333333333333, 0.1, 0.05357142857142857, 0.0975609756097561, 0.04477611940298507, 0.09523809523809523, 0.04411764705882353, 0.10256410256410256, 0.046153846153846156, 0.17647058823529413, 0, 0, 0.06, 0.21052631578947367, 0.058823529411764705, 1.3333333333333333, 0.1, 0.05555555555555555, 0.0975609756097561, 0.04477611940298507, 0.09523809523809523, 0.04411764705882353, 0.10256410256410256, 0.046153846153846156, 0.17647058823529413 ]
def get_field_groups(layer_purpose, layer_subcategory=None): """Obtain list of field groups from layer purpose and subcategory. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: List of layer groups. :rtype: list """ layer_purpose_dict = definition(layer_purpose) if not layer_purpose_dict: return [] field_groups = deepcopy(layer_purpose_dict.get('field_groups', [])) if layer_purpose in [ layer_purpose_exposure['key'], layer_purpose_hazard['key']]: if layer_subcategory: subcategory = definition(layer_subcategory) if 'field_groups' in subcategory: field_groups += deepcopy(subcategory['field_groups']) return field_groups
[ "def", "get_field_groups", "(", "layer_purpose", ",", "layer_subcategory", "=", "None", ")", ":", "layer_purpose_dict", "=", "definition", "(", "layer_purpose", ")", "if", "not", "layer_purpose_dict", ":", "return", "[", "]", "field_groups", "=", "deepcopy", "(", "layer_purpose_dict", ".", "get", "(", "'field_groups'", ",", "[", "]", ")", ")", "if", "layer_purpose", "in", "[", "layer_purpose_exposure", "[", "'key'", "]", ",", "layer_purpose_hazard", "[", "'key'", "]", "]", ":", "if", "layer_subcategory", ":", "subcategory", "=", "definition", "(", "layer_subcategory", ")", "if", "'field_groups'", "in", "subcategory", ":", "field_groups", "+=", "deepcopy", "(", "subcategory", "[", "'field_groups'", "]", ")", "return", "field_groups" ]
36.217391
[ 0.016666666666666666, 0.02857142857142857, 0, 0.06818181818181818, 0.10714285714285714, 0, 0.05454545454545454, 0.09375, 0, 0.08571428571428572, 0.1875, 0.2857142857142857, 0.04, 0.06666666666666667, 0.11764705882352941, 0.028169014084507043, 0.12, 0.041666666666666664, 0.06896551724137931, 0.03636363636363636, 0.044444444444444446, 0.028985507246376812, 0.08695652173913043 ]
def output_vlan(gandi, vlan, datacenters, output_keys, justify=10): """ Helper to output a vlan information.""" output_generic(gandi, vlan, output_keys, justify) if 'dc' in output_keys: for dc in datacenters: if dc['id'] == vlan.get('datacenter_id', vlan.get('datacenter', {}).get('id')): dc_name = dc.get('dc_code', dc.get('iso', '')) break output_line(gandi, 'datacenter', dc_name, justify)
[ "def", "output_vlan", "(", "gandi", ",", "vlan", ",", "datacenters", ",", "output_keys", ",", "justify", "=", "10", ")", ":", "output_generic", "(", "gandi", ",", "vlan", ",", "output_keys", ",", "justify", ")", "if", "'dc'", "in", "output_keys", ":", "for", "dc", "in", "datacenters", ":", "if", "dc", "[", "'id'", "]", "==", "vlan", ".", "get", "(", "'datacenter_id'", ",", "vlan", ".", "get", "(", "'datacenter'", ",", "{", "}", ")", ".", "get", "(", "'id'", ")", ")", ":", "dc_name", "=", "dc", ".", "get", "(", "'dc_code'", ",", "dc", ".", "get", "(", "'iso'", ",", "''", ")", ")", "break", "output_line", "(", "gandi", ",", "'datacenter'", ",", "dc_name", ",", "justify", ")" ]
40.916667
[ 0.014925373134328358, 0.0425531914893617, 0.03773584905660377, 0, 0.07407407407407407, 0.06666666666666667, 0.057692307692307696, 0.04054054054054054, 0.03225806451612903, 0.09523809523809523, 0, 0.034482758620689655 ]
def visit_Expr(self, node: AST, dfltChaining: bool = True) -> str: """Return representation of nested expression.""" return self.visit(node.value)
[ "def", "visit_Expr", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "return", "self", ".", "visit", "(", "node", ".", "value", ")" ]
53.333333
[ 0.015151515151515152, 0.03508771929824561, 0.05405405405405406 ]
def _add_cxnSp(self, connector_type, begin_x, begin_y, end_x, end_y): """Return a newly-added `p:cxnSp` element as specified. The `p:cxnSp` element is for a connector of *connector_type* beginning at (*begin_x*, *begin_y*) and extending to (*end_x*, *end_y*). """ id_ = self._next_shape_id name = 'Connector %d' % (id_-1) flipH, flipV = begin_x > end_x, begin_y > end_y x, y = min(begin_x, end_x), min(begin_y, end_y) cx, cy = abs(end_x - begin_x), abs(end_y - begin_y) return self._element.add_cxnSp( id_, name, connector_type, x, y, cx, cy, flipH, flipV )
[ "def", "_add_cxnSp", "(", "self", ",", "connector_type", ",", "begin_x", ",", "begin_y", ",", "end_x", ",", "end_y", ")", ":", "id_", "=", "self", ".", "_next_shape_id", "name", "=", "'Connector %d'", "%", "(", "id_", "-", "1", ")", "flipH", ",", "flipV", "=", "begin_x", ">", "end_x", ",", "begin_y", ">", "end_y", "x", ",", "y", "=", "min", "(", "begin_x", ",", "end_x", ")", ",", "min", "(", "begin_y", ",", "end_y", ")", "cx", ",", "cy", "=", "abs", "(", "end_x", "-", "begin_x", ")", ",", "abs", "(", "end_y", "-", "begin_y", ")", "return", "self", ".", "_element", ".", "add_cxnSp", "(", "id_", ",", "name", ",", "connector_type", ",", "x", ",", "y", ",", "cx", ",", "cy", ",", "flipH", ",", "flipV", ")" ]
38.352941
[ 0.014492753623188406, 0.031746031746031744, 0, 0.07352941176470588, 0.05, 0.07407407407407407, 0.18181818181818182, 0.06060606060606061, 0.05128205128205128, 0, 0.03636363636363636, 0.03636363636363636, 0.03389830508474576, 0, 0.07692307692307693, 0.03076923076923077, 0.3333333333333333 ]
def calc_stats_iterator(motifs, fg_file, bg_file, genome=None, stats=None, ncpus=None): """Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs. """ if not stats: stats = rocmetrics.__all__ if isinstance(motifs, Motif): all_motifs = [motifs] else: if type([]) == type(motifs): all_motifs = motifs else: all_motifs = read_motifs(motifs, fmt="pwm") if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) chunksize = 240 for i in range(0, len(all_motifs), chunksize): result = {} logger.debug("chunk %s of %s", (i / chunksize) + 1, len(all_motifs) // chunksize + 1) motifs = all_motifs[i:i + chunksize] fg_total = scan_to_best_match(fg_file, motifs, ncpus=ncpus, genome=genome) bg_total = scan_to_best_match(bg_file, motifs, ncpus=ncpus, genome=genome) logger.debug("calculating statistics") if ncpus == 1: it = _single_stats(motifs, stats, fg_total, bg_total) else: it = _mp_stats(motifs, stats, fg_total, bg_total, ncpus) for motif_id, s, ret in it: if motif_id not in result: result[motif_id] = {} result[motif_id][s] = ret yield result
[ "def", "calc_stats_iterator", "(", "motifs", ",", "fg_file", ",", "bg_file", ",", "genome", "=", "None", ",", "stats", "=", "None", ",", "ncpus", "=", "None", ")", ":", "if", "not", "stats", ":", "stats", "=", "rocmetrics", ".", "__all__", "if", "isinstance", "(", "motifs", ",", "Motif", ")", ":", "all_motifs", "=", "[", "motifs", "]", "else", ":", "if", "type", "(", "[", "]", ")", "==", "type", "(", "motifs", ")", ":", "all_motifs", "=", "motifs", "else", ":", "all_motifs", "=", "read_motifs", "(", "motifs", ",", "fmt", "=", "\"pwm\"", ")", "if", "ncpus", "is", "None", ":", "ncpus", "=", "int", "(", "MotifConfig", "(", ")", ".", "get_default_params", "(", ")", "[", "\"ncpus\"", "]", ")", "chunksize", "=", "240", "for", "i", "in", "range", "(", "0", ",", "len", "(", "all_motifs", ")", ",", "chunksize", ")", ":", "result", "=", "{", "}", "logger", ".", "debug", "(", "\"chunk %s of %s\"", ",", "(", "i", "/", "chunksize", ")", "+", "1", ",", "len", "(", "all_motifs", ")", "//", "chunksize", "+", "1", ")", "motifs", "=", "all_motifs", "[", "i", ":", "i", "+", "chunksize", "]", "fg_total", "=", "scan_to_best_match", "(", "fg_file", ",", "motifs", ",", "ncpus", "=", "ncpus", ",", "genome", "=", "genome", ")", "bg_total", "=", "scan_to_best_match", "(", "bg_file", ",", "motifs", ",", "ncpus", "=", "ncpus", ",", "genome", "=", "genome", ")", "logger", ".", "debug", "(", "\"calculating statistics\"", ")", "if", "ncpus", "==", "1", ":", "it", "=", "_single_stats", "(", "motifs", ",", "stats", ",", "fg_total", ",", "bg_total", ")", "else", ":", "it", "=", "_mp_stats", "(", "motifs", ",", "stats", ",", "fg_total", ",", "bg_total", ",", "ncpus", ")", "for", "motif_id", ",", "s", ",", "ret", "in", "it", ":", "if", "motif_id", "not", "in", "result", ":", "result", "[", "motif_id", "]", "=", "{", "}", "result", "[", "motif_id", "]", "[", "s", "]", "=", "ret", "yield", "result" ]
31.119403
[ 0.022988505747126436, 0.047619047619047616, 0, 0.14285714285714285, 0.14285714285714285, 0.075, 0.0410958904109589, 0.06666666666666667, 0, 0.17647058823529413, 0.027777777777777776, 0, 0.17647058823529413, 0.027777777777777776, 0, 0.11538461538461539, 0.03508771929824561, 0.5, 0.11538461538461539, 0.04054054054054054, 0.06666666666666667, 0, 0.12, 0.06451612903225806, 0, 0.18181818181818182, 0.18181818181818182, 0.17647058823529413, 0.02666666666666667, 0.038461538461538464, 0.2857142857142857, 0.11764705882352941, 0.058823529411764705, 0.5, 0.06060606060606061, 0.06896551724137931, 0.2222222222222222, 0.05555555555555555, 0.06451612903225806, 0.15384615384615385, 0.03636363636363636, 0.5, 0.09523809523809523, 0.03125, 0.10526315789473684, 0, 0.04, 0.10526315789473684, 0.07894736842105263, 0.045454545454545456, 0.045454545454545456, 0.2857142857142857, 0.036585365853658534, 0.036585365853658534, 0.4, 0.043478260869565216, 0.25, 0.09090909090909091, 0.045454545454545456, 0.15384615384615385, 0.043478260869565216, 0.25, 0.05714285714285714, 0.05263157894736842, 0.05405405405405406, 0.05405405405405406, 0.1 ]
def remove_menu(self, menu): """ Removes a sub-menu from the context menu. :param menu: Sub-menu to remove. """ self._menus.remove(menu) for action in menu.actions(): self.removeAction(action)
[ "def", "remove_menu", "(", "self", ",", "menu", ")", ":", "self", ".", "_menus", ".", "remove", "(", "menu", ")", "for", "action", "in", "menu", ".", "actions", "(", ")", ":", "self", ".", "removeAction", "(", "action", ")" ]
30.625
[ 0.03571428571428571, 0.18181818181818182, 0.04081632653061224, 0.075, 0.18181818181818182, 0.0625, 0.05405405405405406, 0.05405405405405406 ]
def force_seek(fd, offset, chunk=CHUNK): """ Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``. """ try: fd.seek(offset) except (AttributeError, io.UnsupportedOperation): # This file handle probably has no seek() emulate_seek(fd, offset, chunk)
[ "def", "force_seek", "(", "fd", ",", "offset", ",", "chunk", "=", "CHUNK", ")", ":", "try", ":", "fd", ".", "seek", "(", "offset", ")", "except", "(", "AttributeError", ",", "io", ".", "UnsupportedOperation", ")", ":", "# This file handle probably has no seek()", "emulate_seek", "(", "fd", ",", "offset", ",", "chunk", ")" ]
39.666667
[ 0.025, 0.03333333333333333, 0, 0.038461538461538464, 0.039473684210526314, 0.047619047619047616, 0, 0.039473684210526314, 0.13043478260869565, 0.2857142857142857, 0.25, 0.08695652173913043, 0.03773584905660377, 0.04081632653061224, 0.05128205128205128 ]
def filter_cid(cls, ops, kwargs): """Remove occurrences of the :func:`.circuit_identity` ``cid(n)`` for any ``n``. Cf. :func:`filter_neutral` """ from qnet.algebra.core.circuit_algebra import CircuitZero, circuit_identity if len(ops) == 0: return CircuitZero fops = [op for op in ops if op != circuit_identity(op.cdim)] if len(fops) > 1: return fops, kwargs elif len(fops) == 1: # the remaining operand is the single non-trivial one return fops[0] else: # the original list of operands consists only of neutral elements return ops[0]
[ "def", "filter_cid", "(", "cls", ",", "ops", ",", "kwargs", ")", ":", "from", "qnet", ".", "algebra", ".", "core", ".", "circuit_algebra", "import", "CircuitZero", ",", "circuit_identity", "if", "len", "(", "ops", ")", "==", "0", ":", "return", "CircuitZero", "fops", "=", "[", "op", "for", "op", "in", "ops", "if", "op", "!=", "circuit_identity", "(", "op", ".", "cdim", ")", "]", "if", "len", "(", "fops", ")", ">", "1", ":", "return", "fops", ",", "kwargs", "elif", "len", "(", "fops", ")", "==", "1", ":", "# the remaining operand is the single non-trivial one", "return", "fops", "[", "0", "]", "else", ":", "# the original list of operands consists only of neutral elements", "return", "ops", "[", "0", "]" ]
37.625
[ 0.030303030303030304, 0.025974025974025976, 0.16216216216216217, 0.2857142857142857, 0.02531645569620253, 0.09523809523809523, 0.07692307692307693, 0.03125, 0.09523809523809523, 0.07407407407407407, 0.08333333333333333, 0.03278688524590164, 0.09090909090909091, 0.2222222222222222, 0.0273972602739726, 0.09523809523809523 ]
def round_polygon_coords(p, precision): """ Round the coordinates of a shapely Polygon to some decimal precision. Parameters ---------- p : shapely Polygon the polygon to round the coordinates of precision : int decimal precision to round coordinates to Returns ------- new_poly : shapely Polygon the polygon with rounded coordinates """ # round the coordinates of the Polygon exterior new_exterior = [[round(x, precision) for x in c] for c in p.exterior.coords] # round the coordinates of the (possibly multiple, possibly none) Polygon interior(s) new_interiors = [] for interior in p.interiors: new_interiors.append([[round(x, precision) for x in c] for c in interior.coords]) # construct a new Polygon with the rounded coordinates # buffer by zero to clean self-touching or self-crossing polygons new_poly = Polygon(shell=new_exterior, holes=new_interiors).buffer(0) return new_poly
[ "def", "round_polygon_coords", "(", "p", ",", "precision", ")", ":", "# round the coordinates of the Polygon exterior", "new_exterior", "=", "[", "[", "round", "(", "x", ",", "precision", ")", "for", "x", "in", "c", "]", "for", "c", "in", "p", ".", "exterior", ".", "coords", "]", "# round the coordinates of the (possibly multiple, possibly none) Polygon interior(s)", "new_interiors", "=", "[", "]", "for", "interior", "in", "p", ".", "interiors", ":", "new_interiors", ".", "append", "(", "[", "[", "round", "(", "x", ",", "precision", ")", "for", "x", "in", "c", "]", "for", "c", "in", "interior", ".", "coords", "]", ")", "# construct a new Polygon with the rounded coordinates", "# buffer by zero to clean self-touching or self-crossing polygons", "new_poly", "=", "Polygon", "(", "shell", "=", "new_exterior", ",", "holes", "=", "new_interiors", ")", ".", "buffer", "(", "0", ")", "return", "new_poly" ]
33.724138
[ 0.02564102564102564, 0.2857142857142857, 0.0273972602739726, 0, 0.14285714285714285, 0.14285714285714285, 0.13043478260869565, 0.0425531914893617, 0.15789473684210525, 0.04081632653061224, 0, 0.18181818181818182, 0.18181818181818182, 0.1, 0.045454545454545456, 0.2857142857142857, 0.5, 0.0392156862745098, 0.0375, 0, 0.033707865168539325, 0.09090909090909091, 0.0625, 0.033707865168539325, 0.5, 0.034482758620689655, 0.028985507246376812, 0.0273972602739726, 0.10526315789473684 ]
def read_logfile(log_file): """ Reads an latools analysis.log file, and returns dicts of arguments. Parameters ---------- log_file : str Path to an analysis.log file produced by latools. Returns ------- runargs, paths : tuple Two dictionaries. runargs contains all the arguments required to run each step of analysis in the form (function_name, {'args': (), 'kwargs': {}}). paths contains the locations of the data directory and the SRM database used for analysis. """ dirname = os.path.dirname(log_file) + '/' with open(log_file, 'r') as f: rlog = f.readlines() hashind = [i for i, n in enumerate(rlog) if '#' in n] pathread = re.compile('(.*) :: (.*)\n') paths = (pathread.match(l).groups() for l in rlog[hashind[0] + 1:hashind[-1]] if pathread.match(l)) paths = {k: os.path.join(dirname, v) for k, v in paths} # paths = {k: os.path.abspath(v) for k, v in paths} logread = re.compile('([a-z_]+) :: args=(\(.*\)) kwargs=(\{.*\})') runargs = [] for line in rlog[hashind[1] + 1:]: fname, args, kwargs = (logread.match(line).groups()) runargs.append((fname ,{'args': eval(args), 'kwargs': eval(kwargs)})) if fname == '__init__': runargs[-1][-1]['kwargs']['config'] = 'REPRODUCE' runargs[-1][-1]['kwargs']['dataformat'] = None runargs[-1][-1]['kwargs']['data_folder'] = paths['data_folder'] if 'srm_table' in paths: runargs[-1][-1]['kwargs']['srm_file'] = paths['srm_table'] return runargs, paths
[ "def", "read_logfile", "(", "log_file", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "log_file", ")", "+", "'/'", "with", "open", "(", "log_file", ",", "'r'", ")", "as", "f", ":", "rlog", "=", "f", ".", "readlines", "(", ")", "hashind", "=", "[", "i", "for", "i", ",", "n", "in", "enumerate", "(", "rlog", ")", "if", "'#'", "in", "n", "]", "pathread", "=", "re", ".", "compile", "(", "'(.*) :: (.*)\\n'", ")", "paths", "=", "(", "pathread", ".", "match", "(", "l", ")", ".", "groups", "(", ")", "for", "l", "in", "rlog", "[", "hashind", "[", "0", "]", "+", "1", ":", "hashind", "[", "-", "1", "]", "]", "if", "pathread", ".", "match", "(", "l", ")", ")", "paths", "=", "{", "k", ":", "os", ".", "path", ".", "join", "(", "dirname", ",", "v", ")", "for", "k", ",", "v", "in", "paths", "}", "# paths = {k: os.path.abspath(v) for k, v in paths}", "logread", "=", "re", ".", "compile", "(", "'([a-z_]+) :: args=(\\(.*\\)) kwargs=(\\{.*\\})'", ")", "runargs", "=", "[", "]", "for", "line", "in", "rlog", "[", "hashind", "[", "1", "]", "+", "1", ":", "]", ":", "fname", ",", "args", ",", "kwargs", "=", "(", "logread", ".", "match", "(", "line", ")", ".", "groups", "(", ")", ")", "runargs", ".", "append", "(", "(", "fname", ",", "{", "'args'", ":", "eval", "(", "args", ")", ",", "'kwargs'", ":", "eval", "(", "kwargs", ")", "}", ")", ")", "if", "fname", "==", "'__init__'", ":", "runargs", "[", "-", "1", "]", "[", "-", "1", "]", "[", "'kwargs'", "]", "[", "'config'", "]", "=", "'REPRODUCE'", "runargs", "[", "-", "1", "]", "[", "-", "1", "]", "[", "'kwargs'", "]", "[", "'dataformat'", "]", "=", "None", "runargs", "[", "-", "1", "]", "[", "-", "1", "]", "[", "'kwargs'", "]", "[", "'data_folder'", "]", "=", "paths", "[", "'data_folder'", "]", "if", "'srm_table'", "in", "paths", ":", "runargs", "[", "-", "1", "]", "[", "-", "1", "]", "[", "'kwargs'", "]", "[", "'srm_file'", "]", "=", "paths", "[", "'srm_table'", "]", "return", "runargs", ",", "paths" ]
37.714286
[ 0.037037037037037035, 0.2857142857142857, 0.028169014084507043, 0, 0.14285714285714285, 0.14285714285714285, 0.16666666666666666, 0.03508771929824561, 0.5, 0.18181818181818182, 0.18181818181818182, 0.11538461538461539, 0.03488372093023256, 0.04395604395604396, 0.03614457831325301, 0.2857142857142857, 0.044444444444444446, 0.5, 0.058823529411764705, 0.07142857142857142, 0, 0.03508771929824561, 0, 0.046511627906976744, 0.038834951456310676, 0.03389830508474576, 0.03636363636363636, 0, 0.08571428571428572, 0.125, 0.05263157894736842, 0.03333333333333333, 0.05194805194805195, 0.25, 0.06451612903225806, 0.03278688524590164, 0.034482758620689655, 0.02666666666666667, 0.05555555555555555, 0.02702702702702703, 0, 0.08 ]
def clean_queues(self): # pylint: disable=too-many-locals """Reduces internal list size to max allowed * checks and broks : 5 * length of hosts + services * actions : 5 * length of hosts + services + contacts :return: None """ # If we set the interval at 0, we bail out if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0: logger.debug("No queues cleaning...") return max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services)) max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services)) max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) + len(self.services)) # For checks, it's not very simple: # For checks, they may be referred to their host/service # We do not just del them in the check list, but also in their service/host # We want id of lower than max_id - 2*max_checks self.nb_checks_dropped = 0 if max_checks and len(self.checks) > max_checks: # keys does not ensure sorted keys. Max is slow but we have no other way. to_del_checks = [c for c in list(self.checks.values())] to_del_checks.sort(key=lambda x: x.creation_time) to_del_checks = to_del_checks[:-max_checks] self.nb_checks_dropped = len(to_del_checks) if to_del_checks: logger.warning("I have to drop some checks (%d)..., sorry :(", self.nb_checks_dropped) for chk in to_del_checks: c_id = chk.uuid items = getattr(self, chk.ref_type + 's') elt = items[chk.ref] # First remove the link in host/service elt.remove_in_progress_check(chk) # Then in dependent checks (I depend on, or check # depend on me) for dependent_checks in chk.depend_on_me: dependent_checks.depend_on.remove(chk.uuid) for c_temp in chk.depend_on: c_temp.depend_on_me.remove(chk) del self.checks[c_id] # Final Bye bye ... # For broks and actions, it's more simple # or broks, manage global but also all brokers self.nb_broks_dropped = 0 for broker_link in list(self.my_daemon.brokers.values()): if max_broks and len(broker_link.broks) > max_broks: logger.warning("I have to drop some broks (%d > %d) for the broker %s " "..., sorry :(", len(broker_link.broks), max_broks, broker_link) kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time) # Delete the oldest broks to keep the max_broks most recent... # todo: is it a good choice ! broker_link.broks = kept_broks[0:max_broks] self.nb_actions_dropped = 0 if max_actions and len(self.actions) > max_actions: logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(", len(self.actions), max_actions) to_del_actions = [c for c in list(self.actions.values())] to_del_actions.sort(key=lambda x: x.creation_time) to_del_actions = to_del_actions[:-max_actions] self.nb_actions_dropped = len(to_del_actions) for act in to_del_actions: if act.is_a == 'notification': self.find_item_by_id(act.ref).remove_in_progress_notification(act) del self.actions[act.uuid]
[ "def", "clean_queues", "(", "self", ")", ":", "# pylint: disable=too-many-locals", "# If we set the interval at 0, we bail out", "if", "getattr", "(", "self", ".", "pushed_conf", ",", "'tick_clean_queues'", ",", "0", ")", "==", "0", ":", "logger", ".", "debug", "(", "\"No queues cleaning...\"", ")", "return", "max_checks", "=", "MULTIPLIER_MAX_CHECKS", "*", "(", "len", "(", "self", ".", "hosts", ")", "+", "len", "(", "self", ".", "services", ")", ")", "max_broks", "=", "MULTIPLIER_MAX_BROKS", "*", "(", "len", "(", "self", ".", "hosts", ")", "+", "len", "(", "self", ".", "services", ")", ")", "max_actions", "=", "MULTIPLIER_MAX_ACTIONS", "*", "len", "(", "self", ".", "contacts", ")", "*", "(", "len", "(", "self", ".", "hosts", ")", "+", "len", "(", "self", ".", "services", ")", ")", "# For checks, it's not very simple:", "# For checks, they may be referred to their host/service", "# We do not just del them in the check list, but also in their service/host", "# We want id of lower than max_id - 2*max_checks", "self", ".", "nb_checks_dropped", "=", "0", "if", "max_checks", "and", "len", "(", "self", ".", "checks", ")", ">", "max_checks", ":", "# keys does not ensure sorted keys. Max is slow but we have no other way.", "to_del_checks", "=", "[", "c", "for", "c", "in", "list", "(", "self", ".", "checks", ".", "values", "(", ")", ")", "]", "to_del_checks", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "creation_time", ")", "to_del_checks", "=", "to_del_checks", "[", ":", "-", "max_checks", "]", "self", ".", "nb_checks_dropped", "=", "len", "(", "to_del_checks", ")", "if", "to_del_checks", ":", "logger", ".", "warning", "(", "\"I have to drop some checks (%d)..., sorry :(\"", ",", "self", ".", "nb_checks_dropped", ")", "for", "chk", "in", "to_del_checks", ":", "c_id", "=", "chk", ".", "uuid", "items", "=", "getattr", "(", "self", ",", "chk", ".", "ref_type", "+", "'s'", ")", "elt", "=", "items", "[", "chk", ".", "ref", "]", "# First remove the link in host/service", "elt", ".", "remove_in_progress_check", "(", "chk", ")", "# Then in dependent checks (I depend on, or check", "# depend on me)", "for", "dependent_checks", "in", "chk", ".", "depend_on_me", ":", "dependent_checks", ".", "depend_on", ".", "remove", "(", "chk", ".", "uuid", ")", "for", "c_temp", "in", "chk", ".", "depend_on", ":", "c_temp", ".", "depend_on_me", ".", "remove", "(", "chk", ")", "del", "self", ".", "checks", "[", "c_id", "]", "# Final Bye bye ...", "# For broks and actions, it's more simple", "# or broks, manage global but also all brokers", "self", ".", "nb_broks_dropped", "=", "0", "for", "broker_link", "in", "list", "(", "self", ".", "my_daemon", ".", "brokers", ".", "values", "(", ")", ")", ":", "if", "max_broks", "and", "len", "(", "broker_link", ".", "broks", ")", ">", "max_broks", ":", "logger", ".", "warning", "(", "\"I have to drop some broks (%d > %d) for the broker %s \"", "\"..., sorry :(\"", ",", "len", "(", "broker_link", ".", "broks", ")", ",", "max_broks", ",", "broker_link", ")", "kept_broks", "=", "sorted", "(", "broker_link", ".", "broks", ",", "key", "=", "lambda", "x", ":", "x", ".", "creation_time", ")", "# Delete the oldest broks to keep the max_broks most recent...", "# todo: is it a good choice !", "broker_link", ".", "broks", "=", "kept_broks", "[", "0", ":", "max_broks", "]", "self", ".", "nb_actions_dropped", "=", "0", "if", "max_actions", "and", "len", "(", "self", ".", "actions", ")", ">", "max_actions", ":", "logger", ".", "warning", "(", "\"I have to del some actions (currently: %d, max: %d)..., sorry :(\"", ",", "len", "(", "self", ".", "actions", ")", ",", "max_actions", ")", "to_del_actions", "=", "[", "c", "for", "c", "in", "list", "(", "self", ".", "actions", ".", "values", "(", ")", ")", "]", "to_del_actions", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "creation_time", ")", "to_del_actions", "=", "to_del_actions", "[", ":", "-", "max_actions", "]", "self", ".", "nb_actions_dropped", "=", "len", "(", "to_del_actions", ")", "for", "act", "in", "to_del_actions", ":", "if", "act", ".", "is_a", "==", "'notification'", ":", "self", ".", "find_item_by_id", "(", "act", ".", "ref", ")", ".", "remove_in_progress_notification", "(", "act", ")", "del", "self", ".", "actions", "[", "act", ".", "uuid", "]" ]
50.847222
[ 0.043478260869565216, 0.04878048780487805, 0.038461538461538464, 0, 0.05084745762711865, 0.04918032786885246, 0, 0.14285714285714285, 0.18181818181818182, 0.04, 0.030303030303030304, 0.04081632653061224, 0.1111111111111111, 0, 0.03614457831325301, 0.037037037037037035, 0.046511627906976744, 0.056818181818181816, 0, 0.046511627906976744, 0.03125, 0.03614457831325301, 0.03571428571428571, 0.058823529411764705, 0.03571428571428571, 0.03529411764705882, 0.029850746268656716, 0.03278688524590164, 0.03636363636363636, 0.03636363636363636, 0.06896551724137931, 0.038461538461538464, 0.07407407407407407, 0.05405405405405406, 0.06451612903225806, 0.03508771929824561, 0.05555555555555555, 0.03636363636363636, 0.04081632653061224, 0.03076923076923077, 0.06451612903225806, 0.03508771929824561, 0.031746031746031744, 0.045454545454545456, 0.0392156862745098, 0.034482758620689655, 0, 0.04081632653061224, 0.037037037037037035, 0.06060606060606061, 0.03076923076923077, 0.03125, 0.04597701149425287, 0.05263157894736842, 0, 0.03529411764705882, 0.02564102564102564, 0.044444444444444446, 0.03389830508474576, 0, 0.05714285714285714, 0.03389830508474576, 0.0425531914893617, 0.06896551724137931, 0.028985507246376812, 0.03225806451612903, 0.034482758620689655, 0.03508771929824561, 0.05263157894736842, 0.043478260869565216, 0.03488372093023256, 0.047619047619047616 ]
def text_to_data(self, text, elt, ps): '''convert text into typecode specific data. ''' if self.pyclass is not None: v = self.pyclass(text) else: try: v = int(text) except: try: v = long(text) except: raise EvaluateException('Unparseable integer', ps.Backtrace(elt)) return v
[ "def", "text_to_data", "(", "self", ",", "text", ",", "elt", ",", "ps", ")", ":", "if", "self", ".", "pyclass", "is", "not", "None", ":", "v", "=", "self", ".", "pyclass", "(", "text", ")", "else", ":", "try", ":", "v", "=", "int", "(", "text", ")", "except", ":", "try", ":", "v", "=", "long", "(", "text", ")", "except", ":", "raise", "EvaluateException", "(", "'Unparseable integer'", ",", "ps", ".", "Backtrace", "(", "elt", ")", ")", "return", "v" ]
30.066667
[ 0.02631578947368421, 0.038461538461538464, 0.18181818181818182, 0.05555555555555555, 0.08571428571428572, 0.15384615384615385, 0.125, 0.06896551724137931, 0.15789473684210525, 0.1, 0.058823529411764705, 0.13043478260869565, 0.05970149253731343, 0.07142857142857142, 0.125 ]
def Logs(loggername, echo=True, debug=False, chatty=False, loglevel=logging.INFO, logfile=None, logpath=None, fileHandler=None): """Initialize logging """ log = logging.getLogger(loggername) if fileHandler is None: if logfile is None: logFilename = _ourName else: logFilename = logfile if '.log' not in logFilename: logFilename = '%s.log' % logFilename if logpath is not None: logFilename = os.path.join(logpath, logFilename) _handler = logging.FileHandler(logFilename) _formatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s') _handler.setFormatter(_formatter) log.addHandler(_handler) # logging.fileHandler = _handler else: log.addHandler(fileHandler) # logging.fileHandler = fileHandler if echo: echoHandler = logging.StreamHandler() if chatty: echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s') else: echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s') echoHandler.setFormatter(echoFormatter) log.addHandler(echoHandler) if debug: log.setLevel(logging.DEBUG) else: log.setLevel(loglevel) atexit.register(shutdownLogging)
[ "def", "Logs", "(", "loggername", ",", "echo", "=", "True", ",", "debug", "=", "False", ",", "chatty", "=", "False", ",", "loglevel", "=", "logging", ".", "INFO", ",", "logfile", "=", "None", ",", "logpath", "=", "None", ",", "fileHandler", "=", "None", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "loggername", ")", "if", "fileHandler", "is", "None", ":", "if", "logfile", "is", "None", ":", "logFilename", "=", "_ourName", "else", ":", "logFilename", "=", "logfile", "if", "'.log'", "not", "in", "logFilename", ":", "logFilename", "=", "'%s.log'", "%", "logFilename", "if", "logpath", "is", "not", "None", ":", "logFilename", "=", "os", ".", "path", ".", "join", "(", "logpath", ",", "logFilename", ")", "_handler", "=", "logging", ".", "FileHandler", "(", "logFilename", ")", "_formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(message)s'", ")", "_handler", ".", "setFormatter", "(", "_formatter", ")", "log", ".", "addHandler", "(", "_handler", ")", "# logging.fileHandler = _handler", "else", ":", "log", ".", "addHandler", "(", "fileHandler", ")", "# logging.fileHandler = fileHandler", "if", "echo", ":", "echoHandler", "=", "logging", ".", "StreamHandler", "(", ")", "if", "chatty", ":", "echoFormatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s'", ")", "else", ":", "echoFormatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(message)s'", ")", "echoHandler", ".", "setFormatter", "(", "echoFormatter", ")", "log", ".", "addHandler", "(", "echoHandler", ")", "if", "debug", ":", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "log", ".", "setLevel", "(", "loglevel", ")", "atexit", ".", "register", "(", "shutdownLogging", ")" ]
31.952381
[ 0.015625, 0.08, 0.2857142857142857, 0.05128205128205128, 0, 0.07407407407407407, 0.07407407407407407, 0.058823529411764705, 0.15384615384615385, 0.06060606060606061, 0, 0.05405405405405406, 0.041666666666666664, 0, 0.06451612903225806, 0.03333333333333333, 0, 0.05660377358490566, 0.037037037037037035, 0, 0.04878048780487805, 0.0625, 0.05, 0.2222222222222222, 0.05714285714285714, 0.046511627906976744, 0, 0.16666666666666666, 0.044444444444444446, 0.1111111111111111, 0.025423728813559324, 0.15384615384615385, 0.03409090909090909, 0.0425531914893617, 0.05714285714285714, 0, 0.15384615384615385, 0.05714285714285714, 0.2222222222222222, 0.06666666666666667, 0, 0.05555555555555555 ]
def writecooked(self, text): """Put data directly into the output queue""" # Ensure this is the only thread writing self.OQUEUELOCK.acquire() TelnetHandlerBase.writecooked(self, text) self.OQUEUELOCK.release()
[ "def", "writecooked", "(", "self", ",", "text", ")", ":", "# Ensure this is the only thread writing", "self", ".", "OQUEUELOCK", ".", "acquire", "(", ")", "TelnetHandlerBase", ".", "writecooked", "(", "self", ",", "text", ")", "self", ".", "OQUEUELOCK", ".", "release", "(", ")" ]
40.666667
[ 0.03571428571428571, 0.03773584905660377, 0.041666666666666664, 0.06060606060606061, 0.04081632653061224, 0.06060606060606061 ]
def pop_choice(params: Dict[str, Any], key: str, choices: List[Any], default_to_first_choice: bool = False, history: str = "?.") -> Any: """ Performs the same function as :func:`Params.pop_choice`, but is required in order to deal with places that the Params object is not welcome, such as inside Keras layers. See the docstring of that method for more detail on how this function works. This method adds a ``history`` parameter, in the off-chance that you know it, so that we can reproduce :func:`Params.pop_choice` exactly. We default to using "?." if you don't know the history, so you'll have to fix that in the log if you want to actually recover the logged parameters. """ value = Params(params, history).pop_choice(key, choices, default_to_first_choice) return value
[ "def", "pop_choice", "(", "params", ":", "Dict", "[", "str", ",", "Any", "]", ",", "key", ":", "str", ",", "choices", ":", "List", "[", "Any", "]", ",", "default_to_first_choice", ":", "bool", "=", "False", ",", "history", ":", "str", "=", "\"?.\"", ")", "->", "Any", ":", "value", "=", "Params", "(", "params", ",", "history", ")", ".", "pop_choice", "(", "key", ",", "choices", ",", "default_to_first_choice", ")", "return", "value" ]
50.823529
[ 0.05263157894736842, 0.125, 0.08823529411764706, 0.05660377358490566, 0.09302325581395349, 0.2857142857142857, 0.07142857142857142, 0.030927835051546393, 0.03225806451612903, 0, 0.041666666666666664, 0.07291666666666667, 0.03225806451612903, 0.13333333333333333, 0.2857142857142857, 0.03529411764705882, 0.125 ]
def _get_file_creation_time(file_path): """Returns the creation time of the file at the specified file path in Microsoft FILETIME structure format (https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284.aspx), formatted as a 8-byte unsigned integer bytearray. """ ctime = getctime(file_path) if ctime < -11644473600 or ctime >= 253402300800: raise FileTimeOutOfRangeException(ctime) creation_time_datetime = datetime.utcfromtimestamp(ctime) creation_time_epoch_offset = creation_time_datetime - datetime(1601, 1, 1) creation_time_secs_from_epoch = _convert_timedelta_to_seconds(creation_time_epoch_offset) creation_time_filetime = int(creation_time_secs_from_epoch * (10 ** 7)) file_creation_time = bytearray(8) pack_into(b"Q", file_creation_time, 0, creation_time_filetime) return file_creation_time
[ "def", "_get_file_creation_time", "(", "file_path", ")", ":", "ctime", "=", "getctime", "(", "file_path", ")", "if", "ctime", "<", "-", "11644473600", "or", "ctime", ">=", "253402300800", ":", "raise", "FileTimeOutOfRangeException", "(", "ctime", ")", "creation_time_datetime", "=", "datetime", ".", "utcfromtimestamp", "(", "ctime", ")", "creation_time_epoch_offset", "=", "creation_time_datetime", "-", "datetime", "(", "1601", ",", "1", ",", "1", ")", "creation_time_secs_from_epoch", "=", "_convert_timedelta_to_seconds", "(", "creation_time_epoch_offset", ")", "creation_time_filetime", "=", "int", "(", "creation_time_secs_from_epoch", "*", "(", "10", "**", "7", ")", ")", "file_creation_time", "=", "bytearray", "(", "8", ")", "pack_into", "(", "b\"Q\"", ",", "file_creation_time", ",", "0", ",", "creation_time_filetime", ")", "return", "file_creation_time" ]
37.521739
[ 0.02564102564102564, 0.03225806451612903, 0.061855670103092786, 0.05357142857142857, 0.2857142857142857, 0, 0.06451612903225806, 0, 0.03773584905660377, 0.041666666666666664, 0, 0.03278688524590164, 0, 0.02564102564102564, 0, 0.03225806451612903, 0, 0.02666666666666667, 0, 0.05405405405405406, 0.030303030303030304, 0, 0.06896551724137931 ]
def __check_hash(self, message): """return true/false if hash is good message = dict """ return message[W_HASH] == self.__make_hash(message[W_MESSAGE], self.__token, message[W_SEQ])
[ "def", "__check_hash", "(", "self", ",", "message", ")", ":", "return", "message", "[", "W_HASH", "]", "==", "self", ".", "__make_hash", "(", "message", "[", "W_MESSAGE", "]", ",", "self", ".", "__token", ",", "message", "[", "W_SEQ", "]", ")" ]
41.8
[ 0.03125, 0.045454545454545456, 0.09090909090909091, 0.18181818181818182, 0.03 ]
def set_mode(self, mode, custom_mode = 0, custom_sub_mode = 0): '''set arbitrary flight mode''' mav_autopilot = self.field('HEARTBEAT', 'autopilot', None) if mav_autopilot == mavlink.MAV_AUTOPILOT_PX4: self.set_mode_px4(mode, custom_mode, custom_sub_mode) else: self.set_mode_apm(mode)
[ "def", "set_mode", "(", "self", ",", "mode", ",", "custom_mode", "=", "0", ",", "custom_sub_mode", "=", "0", ")", ":", "mav_autopilot", "=", "self", ".", "field", "(", "'HEARTBEAT'", ",", "'autopilot'", ",", "None", ")", "if", "mav_autopilot", "==", "mavlink", ".", "MAV_AUTOPILOT_PX4", ":", "self", ".", "set_mode_px4", "(", "mode", ",", "custom_mode", ",", "custom_sub_mode", ")", "else", ":", "self", ".", "set_mode_apm", "(", "mode", ")" ]
47.857143
[ 0.07936507936507936, 0.05128205128205128, 0.030303030303030304, 0.037037037037037035, 0.03076923076923077, 0.15384615384615385, 0.05714285714285714 ]
def search_document_cache_key(self): """Key used for storing search docs in local cache.""" return "elasticsearch_django:{}.{}.{}".format( self._meta.app_label, self._meta.model_name, self.pk )
[ "def", "search_document_cache_key", "(", "self", ")", ":", "return", "\"elasticsearch_django:{}.{}.{}\"", ".", "format", "(", "self", ".", "_meta", ".", "app_label", ",", "self", ".", "_meta", ".", "model_name", ",", "self", ".", "pk", ")" ]
45
[ 0.027777777777777776, 0.03225806451612903, 0.05555555555555555, 0.03125, 0.3333333333333333 ]
def save(self, filename, wildcard='*', verbose=False): '''save parameters to a file''' f = open(filename, mode='w') k = list(self.keys()) k.sort() count = 0 for p in k: if p and fnmatch.fnmatch(str(p).upper(), wildcard.upper()): f.write("%-16.16s %f\n" % (p, self.__getitem__(p))) count += 1 f.close() if verbose: print("Saved %u parameters to %s" % (count, filename))
[ "def", "save", "(", "self", ",", "filename", ",", "wildcard", "=", "'*'", ",", "verbose", "=", "False", ")", ":", "f", "=", "open", "(", "filename", ",", "mode", "=", "'w'", ")", "k", "=", "list", "(", "self", ".", "keys", "(", ")", ")", "k", ".", "sort", "(", ")", "count", "=", "0", "for", "p", "in", "k", ":", "if", "p", "and", "fnmatch", ".", "fnmatch", "(", "str", "(", "p", ")", ".", "upper", "(", ")", ",", "wildcard", ".", "upper", "(", ")", ")", ":", "f", ".", "write", "(", "\"%-16.16s %f\\n\"", "%", "(", "p", ",", "self", ".", "__getitem__", "(", "p", ")", ")", ")", "count", "+=", "1", "f", ".", "close", "(", ")", "if", "verbose", ":", "print", "(", "\"Saved %u parameters to %s\"", "%", "(", "count", ",", "filename", ")", ")" ]
36.615385
[ 0.018518518518518517, 0.05128205128205128, 0.05555555555555555, 0.06896551724137931, 0.125, 0.11764705882352941, 0.10526315789473684, 0.028169014084507043, 0.029850746268656716, 0.07692307692307693, 0.11764705882352941, 0.10526315789473684, 0.030303030303030304 ]
def _initialize(self, runtime): """Common initializer for OsidManager and OsidProxyManager""" if runtime is None: raise NullArgument() if self._my_runtime is not None: raise IllegalState('this manager has already been initialized.') self._my_runtime = runtime config = runtime.get_configuration() data_store_path_param_id = Id('parameter:dataStorePath@filesystem') data_store_path = config.get_value_by_parameter(data_store_path_param_id).get_string_value() # for deployments, also see if a "secondaryDataStorePath" exists secondary_data_store_path_param_id = Id('parameter:secondaryDataStorePath@filesystem') try: secondary_data_store_path = config.get_value_by_parameter( secondary_data_store_path_param_id).get_string_value() self._config_map['secondary_data_store_path'] = secondary_data_store_path except (AttributeError, KeyError, NotFound): pass # for deployments, also see if a "urlHostname" exists to prepend to the # assetContent.get_url() method url_hostname_param_id = Id('parameter:urlHostname@filesystem') try: url_hostname = config.get_value_by_parameter( url_hostname_param_id).get_string_value() self._config_map['url_hostname'] = url_hostname except (AttributeError, KeyError, NotFound): pass self._config_map['data_store_path'] = data_store_path # for convenience, also see if a "dataStoreFullPath" exists data_store_full_path_param_id = Id('parameter:dataStoreFullPath@filesystem') try: data_store_full_path = config.get_value_by_parameter( data_store_full_path_param_id).get_string_value() self._config_map['data_store_full_path'] = data_store_full_path except (AttributeError, KeyError, NotFound): pass
[ "def", "_initialize", "(", "self", ",", "runtime", ")", ":", "if", "runtime", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "self", ".", "_my_runtime", "is", "not", "None", ":", "raise", "IllegalState", "(", "'this manager has already been initialized.'", ")", "self", ".", "_my_runtime", "=", "runtime", "config", "=", "runtime", ".", "get_configuration", "(", ")", "data_store_path_param_id", "=", "Id", "(", "'parameter:dataStorePath@filesystem'", ")", "data_store_path", "=", "config", ".", "get_value_by_parameter", "(", "data_store_path_param_id", ")", ".", "get_string_value", "(", ")", "# for deployments, also see if a \"secondaryDataStorePath\" exists", "secondary_data_store_path_param_id", "=", "Id", "(", "'parameter:secondaryDataStorePath@filesystem'", ")", "try", ":", "secondary_data_store_path", "=", "config", ".", "get_value_by_parameter", "(", "secondary_data_store_path_param_id", ")", ".", "get_string_value", "(", ")", "self", ".", "_config_map", "[", "'secondary_data_store_path'", "]", "=", "secondary_data_store_path", "except", "(", "AttributeError", ",", "KeyError", ",", "NotFound", ")", ":", "pass", "# for deployments, also see if a \"urlHostname\" exists to prepend to the", "# assetContent.get_url() method", "url_hostname_param_id", "=", "Id", "(", "'parameter:urlHostname@filesystem'", ")", "try", ":", "url_hostname", "=", "config", ".", "get_value_by_parameter", "(", "url_hostname_param_id", ")", ".", "get_string_value", "(", ")", "self", ".", "_config_map", "[", "'url_hostname'", "]", "=", "url_hostname", "except", "(", "AttributeError", ",", "KeyError", ",", "NotFound", ")", ":", "pass", "self", ".", "_config_map", "[", "'data_store_path'", "]", "=", "data_store_path", "# for convenience, also see if a \"dataStoreFullPath\" exists", "data_store_full_path_param_id", "=", "Id", "(", "'parameter:dataStoreFullPath@filesystem'", ")", "try", ":", "data_store_full_path", "=", "config", ".", "get_value_by_parameter", "(", "data_store_full_path_param_id", ")", ".", "get_string_value", "(", ")", "self", ".", "_config_map", "[", "'data_store_full_path'", "]", "=", "data_store_full_path", "except", "(", "AttributeError", ",", "KeyError", ",", "NotFound", ")", ":", "pass" ]
46.119048
[ 0.03225806451612903, 0.028985507246376812, 0, 0.07407407407407407, 0.0625, 0.05, 0.02631578947368421, 0.058823529411764705, 0.045454545454545456, 0, 0.02666666666666667, 0.03, 0, 0.027777777777777776, 0.031914893617021274, 0.16666666666666666, 0.04285714285714286, 0.04285714285714286, 0.03529411764705882, 0.038461538461538464, 0.125, 0, 0.02531645569620253, 0.05128205128205128, 0.02857142857142857, 0.16666666666666666, 0.05263157894736842, 0.05263157894736842, 0.03389830508474576, 0.038461538461538464, 0.125, 0, 0.03278688524590164, 0, 0.029850746268656716, 0.03571428571428571, 0.16666666666666666, 0.046153846153846156, 0.046153846153846156, 0.02666666666666667, 0.038461538461538464, 0.125 ]
def get_dataset(self, key, info): """Load a dataset.""" logger.debug('Reading %s.', key.name) variable = self.nc[key.name] info.update(variable.attrs) info.update(key.to_dict()) info.update(dict(platform_name=self.platform_name, sensor=self.sensor)) variable.attrs = info return variable
[ "def", "get_dataset", "(", "self", ",", "key", ",", "info", ")", ":", "logger", ".", "debug", "(", "'Reading %s.'", ",", "key", ".", "name", ")", "variable", "=", "self", ".", "nc", "[", "key", ".", "name", "]", "info", ".", "update", "(", "variable", ".", "attrs", ")", "info", ".", "update", "(", "key", ".", "to_dict", "(", ")", ")", "info", ".", "update", "(", "dict", "(", "platform_name", "=", "self", ".", "platform_name", ",", "sensor", "=", "self", ".", "sensor", ")", ")", "variable", ".", "attrs", "=", "info", "return", "variable" ]
28.230769
[ 0.030303030303030304, 0.06896551724137931, 0.044444444444444446, 0, 0.05555555555555555, 0, 0.05714285714285714, 0.058823529411764705, 0.05172413793103448, 0.1111111111111111, 0, 0.06896551724137931, 0.08695652173913043 ]
def rcts(self, command, *args, **kwargs): '''General function for applying a rolling R function to a timeserie''' cls = self.__class__ name = kwargs.pop('name','') date = kwargs.pop('date',None) data = kwargs.pop('data',None) kwargs.pop('bycolumn',None) ts = cls(name=name,date=date,data=data) ts._ts = self.rc(command, *args, **kwargs) return ts
[ "def", "rcts", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "self", ".", "__class__", "name", "=", "kwargs", ".", "pop", "(", "'name'", ",", "''", ")", "date", "=", "kwargs", ".", "pop", "(", "'date'", ",", "None", ")", "data", "=", "kwargs", ".", "pop", "(", "'data'", ",", "None", ")", "kwargs", ".", "pop", "(", "'bycolumn'", ",", "None", ")", "ts", "=", "cls", "(", "name", "=", "name", ",", "date", "=", "date", ",", "data", "=", "data", ")", "ts", ".", "_ts", "=", "self", ".", "rc", "(", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ts" ]
41.9
[ 0.023809523809523808, 0.0125, 0.034482758620689655, 0.05405405405405406, 0.05128205128205128, 0.05128205128205128, 0.05555555555555555, 0.08163265306122448, 0.0196078431372549, 0.11764705882352941 ]
def genome_coverage(genomes, scaffold_coverage, total_bases): """ coverage = (number of bases / length of genome) * 100 """ coverage = {} custom = {} std = {} for genome in genomes: for sequence in parse_fasta(genome): scaffold = sequence[0].split('>')[1].split()[0] coverage, std = sum_coverage(coverage, std, genome, scaffold, sequence, scaffold_coverage) custom = calc_custom(custom, genome, scaffold, sequence, scaffold_coverage, total_bases) std = calc_std(std) custom_std = calc_std(custom) custom_av = {} for genome in custom: custom_av[genome] = [] for sample in custom[genome]: custom_av[genome].append(numpy.mean(sample)) for genome in coverage: print('%s\t%s' % (genome, coverage[genome][0][1])) if total_bases is True: total_bases = calc_total_mapped_bases(coverage) absolute = absolute_abundance(coverage, total_bases) for genome in coverage: calculated = [] for calc in coverage[genome]: calculated.append(calc[0] / calc[1]) coverage[genome] = calculated relative = relative_abundance(coverage) return coverage, std, absolute, relative, custom_av, custom_std
[ "def", "genome_coverage", "(", "genomes", ",", "scaffold_coverage", ",", "total_bases", ")", ":", "coverage", "=", "{", "}", "custom", "=", "{", "}", "std", "=", "{", "}", "for", "genome", "in", "genomes", ":", "for", "sequence", "in", "parse_fasta", "(", "genome", ")", ":", "scaffold", "=", "sequence", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", "coverage", ",", "std", "=", "sum_coverage", "(", "coverage", ",", "std", ",", "genome", ",", "scaffold", ",", "sequence", ",", "scaffold_coverage", ")", "custom", "=", "calc_custom", "(", "custom", ",", "genome", ",", "scaffold", ",", "sequence", ",", "scaffold_coverage", ",", "total_bases", ")", "std", "=", "calc_std", "(", "std", ")", "custom_std", "=", "calc_std", "(", "custom", ")", "custom_av", "=", "{", "}", "for", "genome", "in", "custom", ":", "custom_av", "[", "genome", "]", "=", "[", "]", "for", "sample", "in", "custom", "[", "genome", "]", ":", "custom_av", "[", "genome", "]", ".", "append", "(", "numpy", ".", "mean", "(", "sample", ")", ")", "for", "genome", "in", "coverage", ":", "print", "(", "'%s\\t%s'", "%", "(", "genome", ",", "coverage", "[", "genome", "]", "[", "0", "]", "[", "1", "]", ")", ")", "if", "total_bases", "is", "True", ":", "total_bases", "=", "calc_total_mapped_bases", "(", "coverage", ")", "absolute", "=", "absolute_abundance", "(", "coverage", ",", "total_bases", ")", "for", "genome", "in", "coverage", ":", "calculated", "=", "[", "]", "for", "calc", "in", "coverage", "[", "genome", "]", ":", "calculated", ".", "append", "(", "calc", "[", "0", "]", "/", "calc", "[", "1", "]", ")", "coverage", "[", "genome", "]", "=", "calculated", "relative", "=", "relative_abundance", "(", "coverage", ")", "return", "coverage", ",", "std", ",", "absolute", ",", "relative", ",", "custom_av", ",", "custom_std" ]
35.16129
[ 0.01639344262295082, 0.75, 0.05555555555555555, 0.75, 0.21428571428571427, 0.25, 0.3333333333333333, 0.13043478260869565, 0.07894736842105263, 0.06, 0.043010752688172046, 0.04395604395604396, 0.15, 0.1, 0.2, 0.13636363636363635, 0.125, 0.0967741935483871, 0.06382978723404255, 0.125, 0.057692307692307696, 0.125, 0.061224489795918366, 0.05660377358490566, 0.125, 0.17647058823529413, 0.0967741935483871, 0.07692307692307693, 0.0967741935483871, 0.075, 0.046875 ]
def get_logs(self, join_newline=True): ''''get_logs will return the complete history, joined by newline (default) or as is. ''' if join_newline: return '\n'.join(self.history) return self.history
[ "def", "get_logs", "(", "self", ",", "join_newline", "=", "True", ")", ":", "if", "join_newline", ":", "return", "'\\n'", ".", "join", "(", "self", ".", "history", ")", "return", "self", ".", "history" ]
34.428571
[ 0.02631578947368421, 0.027777777777777776, 0.1111111111111111, 0.18181818181818182, 0.08333333333333333, 0.047619047619047616, 0.07407407407407407 ]
def sanitize(self): """Removes sensitive data including file names and dependencies. Returns a Heartbeat. """ if not self.args.hide_file_names: return self if self.entity is None: return self if self.type != 'file': return self if self.should_obfuscate_filename(): self._sanitize_metadata() extension = u(os.path.splitext(self.entity)[1]) self.entity = u('HIDDEN{0}').format(extension) elif self.should_obfuscate_project(): self._sanitize_metadata() return self
[ "def", "sanitize", "(", "self", ")", ":", "if", "not", "self", ".", "args", ".", "hide_file_names", ":", "return", "self", "if", "self", ".", "entity", "is", "None", ":", "return", "self", "if", "self", ".", "type", "!=", "'file'", ":", "return", "self", "if", "self", ".", "should_obfuscate_filename", "(", ")", ":", "self", ".", "_sanitize_metadata", "(", ")", "extension", "=", "u", "(", "os", ".", "path", ".", "splitext", "(", "self", ".", "entity", ")", "[", "1", "]", ")", "self", ".", "entity", "=", "u", "(", "'HIDDEN{0}'", ")", ".", "format", "(", "extension", ")", "elif", "self", ".", "should_obfuscate_project", "(", ")", ":", "self", ".", "_sanitize_metadata", "(", ")", "return", "self" ]
26.130435
[ 0.05263157894736842, 0.027777777777777776, 0, 0.07142857142857142, 0.18181818181818182, 0, 0.04878048780487805, 0.08695652173913043, 0, 0.06451612903225806, 0.08695652173913043, 0, 0.06451612903225806, 0.08695652173913043, 0, 0.045454545454545456, 0.05405405405405406, 0.03389830508474576, 0.034482758620689655, 0.044444444444444446, 0.05405405405405406, 0, 0.10526315789473684 ]
def get_pattern_step_time(self, patternnumber, stepnumber): """Get the step time. Args: * patternnumber (integer): 0-7 * stepnumber (integer): 0-7 Returns: The step time (int??). """ _checkPatternNumber(patternnumber) _checkStepNumber(stepnumber) address = _calculateRegisterAddress('time', patternnumber, stepnumber) return self.read_register(address, 0)
[ "def", "get_pattern_step_time", "(", "self", ",", "patternnumber", ",", "stepnumber", ")", ":", "_checkPatternNumber", "(", "patternnumber", ")", "_checkStepNumber", "(", "stepnumber", ")", "address", "=", "_calculateRegisterAddress", "(", "'time'", ",", "patternnumber", ",", "stepnumber", ")", "return", "self", ".", "read_register", "(", "address", ",", "0", ")" ]
30.5
[ 0.01694915254237288, 0.06896551724137931, 0, 0.15384615384615385, 0.07142857142857142, 0.07692307692307693, 0.16666666666666666, 0.125, 0.08695652173913043, 0.16666666666666666, 0.18181818181818182, 0.047619047619047616, 0.05555555555555555, 0.25, 0.02564102564102564, 0.044444444444444446 ]
def iprange(self, *args): """Returns a random address from within the given range of two addresses IPRANGE:start,end %{IPRANGE:10.0.0.0/8,} -> '' """ call_args = list(args) return self.random.choice(IPRange(call_args.pop(0), call_args.pop(0)))
[ "def", "iprange", "(", "self", ",", "*", "args", ")", ":", "call_args", "=", "list", "(", "args", ")", "return", "self", ".", "random", ".", "choice", "(", "IPRange", "(", "call_args", ".", "pop", "(", "0", ")", ",", "call_args", ".", "pop", "(", "0", ")", ")", ")" ]
36.125
[ 0.04, 0.0375, 0.13793103448275862, 0, 0.1111111111111111, 0.18181818181818182, 0.06666666666666667, 0.02564102564102564 ]
def team_abbreviation(self): """ Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings. """ # For career stats, skip the team abbreviation. if self._season[self._index].lower() == 'career': return None return self._team_abbreviation[self._index]
[ "def", "team_abbreviation", "(", "self", ")", ":", "# For career stats, skip the team abbreviation.", "if", "self", ".", "_season", "[", "self", ".", "_index", "]", ".", "lower", "(", ")", "==", "'career'", ":", "return", "None", "return", "self", ".", "_team_abbreviation", "[", "self", ".", "_index", "]" ]
37.777778
[ 0.03571428571428571, 0.18181818181818182, 0.038461538461538464, 0.07692307692307693, 0.18181818181818182, 0.03636363636363636, 0.03508771929824561, 0.08695652173913043, 0.0392156862745098 ]
def raw(self): """Return raw key. returns: str: raw key """ if self._raw: return text_type(self._raw).strip("\r\n") else: return text_type(base64decode(self._b64encoded)).strip("\r\n")
[ "def", "raw", "(", "self", ")", ":", "if", "self", ".", "_raw", ":", "return", "text_type", "(", "self", ".", "_raw", ")", ".", "strip", "(", "\"\\r\\n\"", ")", "else", ":", "return", "text_type", "(", "base64decode", "(", "self", ".", "_b64encoded", ")", ")", ".", "strip", "(", "\"\\r\\n\"", ")" ]
25.2
[ 0.07142857142857142, 0.07692307692307693, 0, 0.125, 0.08333333333333333, 0.18181818181818182, 0.09523809523809523, 0.03773584905660377, 0.15384615384615385, 0.02702702702702703 ]
def _handle_inspect_reply(self, rep): """ Reimplement call tips to only show signatures, using the same style from our Editor and External Console too """ cursor = self._get_cursor() info = self._request_info.get('call_tip') if info and info.id == rep['parent_header']['msg_id'] and \ info.pos == cursor.position(): content = rep['content'] if content.get('status') == 'ok' and content.get('found', False): signature = self.get_signature(content) if signature: # TODO: Pass the language from the Console to the calltip self._control.show_calltip(signature, color='#999999', is_python=True)
[ "def", "_handle_inspect_reply", "(", "self", ",", "rep", ")", ":", "cursor", "=", "self", ".", "_get_cursor", "(", ")", "info", "=", "self", ".", "_request_info", ".", "get", "(", "'call_tip'", ")", "if", "info", "and", "info", ".", "id", "==", "rep", "[", "'parent_header'", "]", "[", "'msg_id'", "]", "and", "info", ".", "pos", "==", "cursor", ".", "position", "(", ")", ":", "content", "=", "rep", "[", "'content'", "]", "if", "content", ".", "get", "(", "'status'", ")", "==", "'ok'", "and", "content", ".", "get", "(", "'found'", ",", "False", ")", ":", "signature", "=", "self", ".", "get_signature", "(", "content", ")", "if", "signature", ":", "# TODO: Pass the language from the Console to the calltip", "self", ".", "_control", ".", "show_calltip", "(", "signature", ",", "color", "=", "'#999999'", ",", "is_python", "=", "True", ")" ]
48.9375
[ 0.02702702702702703, 0.18181818181818182, 0.028985507246376812, 0.037037037037037035, 0.18181818181818182, 0.05714285714285714, 0.04081632653061224, 0.029850746268656716, 0.075, 0.05555555555555555, 0.025974025974025976, 0.03636363636363636, 0.06896551724137931, 0.025974025974025976, 0.04054054054054054, 0.08064516129032258 ]
def est_kl_divergence(self, other, kernel=None, delta=1e-2): """ Finds the KL divergence between this and another particle distribution by using a kernel density estimator to smooth over the other distribution's particles. :param SMCUpdater other: """ return self._kl_divergence( other.particle_locations, other.particle_weights, kernel, delta )
[ "def", "est_kl_divergence", "(", "self", ",", "other", ",", "kernel", "=", "None", ",", "delta", "=", "1e-2", ")", ":", "return", "self", ".", "_kl_divergence", "(", "other", ".", "particle_locations", ",", "other", ".", "particle_weights", ",", "kernel", ",", "delta", ")" ]
33.384615
[ 0.016666666666666666, 0.18181818181818182, 0.03076923076923077, 0.02666666666666667, 0.05128205128205128, 0, 0.09375, 0.18181818181818182, 0.08571428571428572, 0.05405405405405406, 0.05714285714285714, 0.08, 0.3333333333333333 ]
def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
[ "def", "_delete_keys", "(", "dct", ",", "keys", ")", ":", "c", "=", "deepcopy", "(", "dct", ")", "assert", "isinstance", "(", "keys", ",", "list", ")", "for", "k", "in", "keys", ":", "c", ".", "pop", "(", "k", ")", "return", "c" ]
22.875
[ 0.03571428571428571, 0.041666666666666664, 0.2857142857142857, 0.09523809523809523, 0.06060606060606061, 0.1111111111111111, 0.125, 0.16666666666666666 ]
def list_all(self, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None): """ Returns all of the images in one call, rather than in paginated batches. """ def strip_version(uri): """ The 'next' uri contains a redundant version number. We need to strip it to use in the method_get() call. """ pos = uri.find("/images") return uri[pos:] obj_class = self.resource_class resp, resp_body = self.list(name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir, return_raw=True) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret = [obj_class(manager=self, info=res) for res in data if res] while next_uri: resp, resp_body = self.api.method_get(next_uri) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret.extend([obj_class(manager=self, info=res) for res in data if res]) return ret
[ "def", "list_all", "(", "self", ",", "name", "=", "None", ",", "visibility", "=", "None", ",", "member_status", "=", "None", ",", "owner", "=", "None", ",", "tag", "=", "None", ",", "status", "=", "None", ",", "size_min", "=", "None", ",", "size_max", "=", "None", ",", "sort_key", "=", "None", ",", "sort_dir", "=", "None", ")", ":", "def", "strip_version", "(", "uri", ")", ":", "\"\"\"\n The 'next' uri contains a redundant version number. We need to\n strip it to use in the method_get() call.\n \"\"\"", "pos", "=", "uri", ".", "find", "(", "\"/images\"", ")", "return", "uri", "[", "pos", ":", "]", "obj_class", "=", "self", ".", "resource_class", "resp", ",", "resp_body", "=", "self", ".", "list", "(", "name", "=", "name", ",", "visibility", "=", "visibility", ",", "member_status", "=", "member_status", ",", "owner", "=", "owner", ",", "tag", "=", "tag", ",", "status", "=", "status", ",", "size_min", "=", "size_min", ",", "size_max", "=", "size_max", ",", "sort_key", "=", "sort_key", ",", "sort_dir", "=", "sort_dir", ",", "return_raw", "=", "True", ")", "data", "=", "resp_body", ".", "get", "(", "self", ".", "plural_response_key", ",", "resp_body", ")", "next_uri", "=", "strip_version", "(", "resp_body", ".", "get", "(", "\"next\"", ",", "\"\"", ")", ")", "ret", "=", "[", "obj_class", "(", "manager", "=", "self", ",", "info", "=", "res", ")", "for", "res", "in", "data", "if", "res", "]", "while", "next_uri", ":", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "method_get", "(", "next_uri", ")", "data", "=", "resp_body", ".", "get", "(", "self", ".", "plural_response_key", ",", "resp_body", ")", "next_uri", "=", "strip_version", "(", "resp_body", ".", "get", "(", "\"next\"", ",", "\"\"", ")", ")", "ret", ".", "extend", "(", "[", "obj_class", "(", "manager", "=", "self", ",", "info", "=", "res", ")", "for", "res", "in", "data", "if", "res", "]", ")", "return", "ret" ]
46
[ 0.030303030303030304, 0.09210526315789473, 0.11904761904761904, 0.18181818181818182, 0.0375, 0.18181818181818182, 0, 0.06451612903225806, 0.13333333333333333, 0.02702702702702703, 0.03773584905660377, 0.13333333333333333, 0.05405405405405406, 0.07142857142857142, 0, 0.05128205128205128, 0.043478260869565216, 0.07575757575757576, 0.07352941176470588, 0.08571428571428572, 0.03076923076923077, 0.03389830508474576, 0.027777777777777776, 0.08695652173913043, 0.03389830508474576, 0.028985507246376812, 0.031746031746031744, 0.05263157894736842, 0.06818181818181818, 0.1111111111111111 ]
def storages_timeseries(self): """ Returns a dataframe with storage time series. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Dataframe containing time series of all storages installed in the MV grid and LV grids. Index of the dataframe is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the storage representatives. """ storages_p = pd.DataFrame() storages_q = pd.DataFrame() grids = [self.network.mv_grid] + list(self.network.mv_grid.lv_grids) for grid in grids: for storage in grid.graph.nodes_by_attribute('storage'): ts = storage.timeseries storages_p[repr(storage)] = ts.p storages_q[repr(storage)] = ts.q return storages_p, storages_q
[ "def", "storages_timeseries", "(", "self", ")", ":", "storages_p", "=", "pd", ".", "DataFrame", "(", ")", "storages_q", "=", "pd", ".", "DataFrame", "(", ")", "grids", "=", "[", "self", ".", "network", ".", "mv_grid", "]", "+", "list", "(", "self", ".", "network", ".", "mv_grid", ".", "lv_grids", ")", "for", "grid", "in", "grids", ":", "for", "storage", "in", "grid", ".", "graph", ".", "nodes_by_attribute", "(", "'storage'", ")", ":", "ts", "=", "storage", ".", "timeseries", "storages_p", "[", "repr", "(", "storage", ")", "]", "=", "ts", ".", "p", "storages_q", "[", "repr", "(", "storage", ")", "]", "=", "ts", ".", "q", "return", "storages_p", ",", "storages_q" ]
35
[ 0.03333333333333333, 0.18181818181818182, 0.03773584905660377, 0, 0.13333333333333333, 0.13333333333333333, 0.15555555555555556, 0, 0.025974025974025976, 0.03278688524590164, 0.0945945945945946, 0.05555555555555555, 0, 0.18181818181818182, 0.05714285714285714, 0.05714285714285714, 0.02631578947368421, 0.07692307692307693, 0.029411764705882353, 0.05128205128205128, 0.041666666666666664, 0.041666666666666664, 0, 0.05405405405405406 ]
def dskmi2(vrtces, plates, finscl, corscl, worksz, voxpsz, voxlsz, makvtl, spxisz): """ Make spatial index for a DSK type 2 segment. The index is returned as a pair of arrays, one of type int and one of type float. These arrays are suitable for use with the DSK type 2 writer dskw02. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskmi2_c.html :param vrtces: Vertices :type vrtces: NxM-Element Array of floats :param plates: Plates :type plates: NxM-Element Array of ints :param finscl: Fine voxel scale :type finscl: float :param corscl: Coarse voxel scale :type corscl: int :param worksz: Workspace size :type worksz: int :param voxpsz: Voxel plate pointer array size :type voxpsz: int :param voxlsz: Voxel plate list array size :type voxlsz: int :param makvtl: Vertex plate list flag :type makvtl: bool :param spxisz: Spatial index integer component size :type spxisz: int :return: double precision and integer components of the spatial index of the segment. :rtype: tuple """ nv = ctypes.c_int(len(vrtces)) vrtces = stypes.toDoubleMatrix(vrtces) np = ctypes.c_int(len(plates)) plates = stypes.toIntMatrix(plates) finscl = ctypes.c_double(finscl) corscl = ctypes.c_int(corscl) worksz = ctypes.c_int(worksz) voxpsz = ctypes.c_int(voxpsz) voxlsz = ctypes.c_int(voxlsz) makvtl = ctypes.c_int(makvtl) spxisz = ctypes.c_int(spxisz) work = stypes.emptyIntMatrix(2, worksz) spaixd = stypes.emptyDoubleVector(10) # SPICE_DSK02_SPADSZ spaixi = stypes.emptyIntVector(spxisz) libspice.dskmi2_c(nv, vrtces, np, plates, finscl, corscl, worksz, voxpsz, voxlsz, makvtl, spxisz, work, spaixd, spaixi) return stypes.cVectorToPython(spaixd), stypes.cVectorToPython(spaixi)
[ "def", "dskmi2", "(", "vrtces", ",", "plates", ",", "finscl", ",", "corscl", ",", "worksz", ",", "voxpsz", ",", "voxlsz", ",", "makvtl", ",", "spxisz", ")", ":", "nv", "=", "ctypes", ".", "c_int", "(", "len", "(", "vrtces", ")", ")", "vrtces", "=", "stypes", ".", "toDoubleMatrix", "(", "vrtces", ")", "np", "=", "ctypes", ".", "c_int", "(", "len", "(", "plates", ")", ")", "plates", "=", "stypes", ".", "toIntMatrix", "(", "plates", ")", "finscl", "=", "ctypes", ".", "c_double", "(", "finscl", ")", "corscl", "=", "ctypes", ".", "c_int", "(", "corscl", ")", "worksz", "=", "ctypes", ".", "c_int", "(", "worksz", ")", "voxpsz", "=", "ctypes", ".", "c_int", "(", "voxpsz", ")", "voxlsz", "=", "ctypes", ".", "c_int", "(", "voxlsz", ")", "makvtl", "=", "ctypes", ".", "c_int", "(", "makvtl", ")", "spxisz", "=", "ctypes", ".", "c_int", "(", "spxisz", ")", "work", "=", "stypes", ".", "emptyIntMatrix", "(", "2", ",", "worksz", ")", "spaixd", "=", "stypes", ".", "emptyDoubleVector", "(", "10", ")", "# SPICE_DSK02_SPADSZ", "spaixi", "=", "stypes", ".", "emptyIntVector", "(", "spxisz", ")", "libspice", ".", "dskmi2_c", "(", "nv", ",", "vrtces", ",", "np", ",", "plates", ",", "finscl", ",", "corscl", ",", "worksz", ",", "voxpsz", ",", "voxlsz", ",", "makvtl", ",", "spxisz", ",", "work", ",", "spaixd", ",", "spaixi", ")", "return", "stypes", ".", "cVectorToPython", "(", "spaixd", ")", ",", "stypes", ".", "cVectorToPython", "(", "spaixi", ")" ]
39.26087
[ 0.024096385542168676, 0.2857142857142857, 0.02857142857142857, 0.03571428571428571, 0.03125, 0.1111111111111111, 0, 0.0410958904109589, 0, 0.1111111111111111, 0.06666666666666667, 0.12, 0.06976744186046512, 0.08571428571428572, 0.13043478260869565, 0.08108108108108109, 0.14285714285714285, 0.09090909090909091, 0.14285714285714285, 0.061224489795918366, 0.14285714285714285, 0.06521739130434782, 0.14285714285714285, 0.07317073170731707, 0.13636363636363635, 0.05454545454545454, 0.14285714285714285, 0.0449438202247191, 0.17647058823529413, 0.2857142857142857, 0.07894736842105263, 0.047619047619047616, 0.07894736842105263, 0.05128205128205128, 0.05555555555555555, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0.06666666666666667, 0.04838709677419355, 0.047619047619047616, 0.024390243902439025, 0.0273972602739726 ]
def get_bbands(self, symbol, interval='daily', time_period=20, series_type='close', nbdevup=None, nbdevdn=None, matype=None): """ Return the bollinger bands values in two json objects as data and meta_data. It raises ValueError when problems arise Keyword Arguments: symbol: the symbol for the equity we want to get its data interval: time interval between two conscutive values, supported values are '1min', '5min', '15min', '30min', '60min', 'daily', 'weekly', 'monthly' (default 'daily' series_type: The desired price type in the time series. Four types are supported: 'close', 'open', 'high', 'low' (default 'close') nbdevup: The standard deviation multiplier of the upper band. Positive integers are accepted as default (default=2) nbdevdn: The standard deviation multiplier of the lower band. Positive integers are accepted as default (default=2) matype : Moving average type. By default, matype=0. Integers 0 - 8 are accepted (check down the mappings) or the string containing the math type can also be used. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA) """ _FUNCTION_KEY = "BBANDS" return _FUNCTION_KEY, 'Technical Analysis: BBANDS', 'Meta Data'
[ "def", "get_bbands", "(", "self", ",", "symbol", ",", "interval", "=", "'daily'", ",", "time_period", "=", "20", ",", "series_type", "=", "'close'", ",", "nbdevup", "=", "None", ",", "nbdevdn", "=", "None", ",", "matype", "=", "None", ")", ":", "_FUNCTION_KEY", "=", "\"BBANDS\"", "return", "_FUNCTION_KEY", ",", "'Technical Analysis: BBANDS'", ",", "'Meta Data'" ]
56.090909
[ 0.03571428571428571, 0.11666666666666667, 0.038461538461538464, 0.02564102564102564, 0.15384615384615385, 0, 0.07692307692307693, 0.02857142857142857, 0.029850746268656716, 0.03409090909090909, 0.057692307692307696, 0.02531645569620253, 0.02531645569620253, 0.03614457831325301, 0.05, 0.03614457831325301, 0.05, 0.0625, 0.047619047619047616, 0.034482758620689655, 0, 0.06, 0.05454545454545454, 0.057692307692307696, 0.047619047619047616, 0.047619047619047616, 0.05357142857142857, 0.05, 0.04918032786885246, 0.05263157894736842, 0.18181818181818182, 0.0625, 0.028169014084507043 ]
def _noneload(l: Loader, value, type_) -> None: """ Loads a value that can only be None, so it fails if it isn't """ if value is None: return None raise TypedloadValueError('Not None', value=value, type_=type_)
[ "def", "_noneload", "(", "l", ":", "Loader", ",", "value", ",", "type_", ")", "->", "None", ":", "if", "value", "is", "None", ":", "return", "None", "raise", "TypedloadValueError", "(", "'Not None'", ",", "value", "=", "value", ",", "type_", "=", "type_", ")" ]
29.375
[ 0.0425531914893617, 0.2857142857142857, 0.05, 0.07407407407407407, 0.2857142857142857, 0.09523809523809523, 0.10526315789473684, 0.029850746268656716 ]
def route_method(method_name, extra_part=False): """Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None """ def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() callable_obj.url_extra_part = callable_obj.__name__ if extra_part\ else None return classmethod(callable_obj) return wrapper
[ "def", "route_method", "(", "method_name", ",", "extra_part", "=", "False", ")", ":", "def", "wrapper", "(", "callable_obj", ")", ":", "if", "method_name", ".", "lower", "(", ")", "not", "in", "DEFAULT_ROUTES", ":", "raise", "HandlerHTTPMethodError", "(", "'Invalid http method in method: {}'", ".", "format", "(", "method_name", ")", ")", "callable_obj", ".", "http_method", "=", "method_name", ".", "upper", "(", ")", "callable_obj", ".", "url_extra_part", "=", "callable_obj", ".", "__name__", "if", "extra_part", "else", "None", "return", "classmethod", "(", "callable_obj", ")", "return", "wrapper" ]
29.142857
[ 0.020833333333333332, 0.05, 0.029850746268656716, 0, 0.2222222222222222, 0.06779661016949153, 0.038461538461538464, 0.06, 0, 0.16666666666666666, 0.05714285714285714, 0, 0.15384615384615385, 0.125, 0.08, 0.0967741935483871, 0.18181818181818182, 0.1, 0.15384615384615385, 0.09090909090909091, 0.16666666666666666, 0.2857142857142857, 0.06666666666666667, 0.03773584905660377, 0.07317073170731707, 0.028169014084507043, 0.23076923076923078, 0, 0.037037037037037035, 0, 0.02702702702702703, 0.09523809523809523, 0, 0.05, 0.1111111111111111 ]
def tangent_bbox_intersection(first, second, intersections): r"""Check if two curves with tangent bounding boxes intersect. .. note:: This is a helper for :func:`intersect_one_round`. These functions are used (directly or indirectly) by :func:`_all_intersections` exclusively, and that function has a Fortran equivalent. If the bounding boxes are tangent, intersection can only occur along that tangency. If the curve is **not** a line, the **only** way the curve can touch the bounding box is at the endpoints. To see this, consider the component .. math:: x(s) = \sum_j W_j x_j. Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there is some :math:`k` with :math:`x_k < M = \max x_j`, then for any interior :math:`s` .. math:: x(s) < \sum_j W_j M = M. If all :math:`x_j = M`, then :math:`B(s)` falls on the line :math:`x = M`. (A similar argument holds for the other three component-extrema types.) .. note:: This function assumes callers will not pass curves that can be linearized / are linear. In :func:`_all_intersections`, curves are pre-processed to do any linearization before the subdivision / intersection process begins. Args: first (SubdividedCurve): First curve being intersected (assumed in :math:\mathbf{R}^2`). second (SubdividedCurve): Second curve being intersected (assumed in :math:\mathbf{R}^2`). intersections (list): A list of already encountered intersections. If these curves intersect at their tangency, then those intersections will be added to this list. """ node_first1 = first.nodes[:, 0] node_first2 = first.nodes[:, -1] node_second1 = second.nodes[:, 0] node_second2 = second.nodes[:, -1] endpoint_check( first, node_first1, 0.0, second, node_second1, 0.0, intersections ) endpoint_check( first, node_first1, 0.0, second, node_second2, 1.0, intersections ) endpoint_check( first, node_first2, 1.0, second, node_second1, 0.0, intersections ) endpoint_check( first, node_first2, 1.0, second, node_second2, 1.0, intersections )
[ "def", "tangent_bbox_intersection", "(", "first", ",", "second", ",", "intersections", ")", ":", "node_first1", "=", "first", ".", "nodes", "[", ":", ",", "0", "]", "node_first2", "=", "first", ".", "nodes", "[", ":", ",", "-", "1", "]", "node_second1", "=", "second", ".", "nodes", "[", ":", ",", "0", "]", "node_second2", "=", "second", ".", "nodes", "[", ":", ",", "-", "1", "]", "endpoint_check", "(", "first", ",", "node_first1", ",", "0.0", ",", "second", ",", "node_second1", ",", "0.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first1", ",", "0.0", ",", "second", ",", "node_second2", ",", "1.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first2", ",", "1.0", ",", "second", ",", "node_second1", ",", "0.0", ",", "intersections", ")", "endpoint_check", "(", "first", ",", "node_first2", ",", "1.0", ",", "second", ",", "node_second2", ",", "1.0", ",", "intersections", ")" ]
34.046154
[ 0.016666666666666666, 0.030303030303030304, 0, 0.23076923076923078, 0, 0.11290322580645161, 0.07547169811320754, 0.08571428571428572, 0.11538461538461539, 0, 0.03636363636363636, 0.05714285714285714, 0, 0.05555555555555555, 0.029850746268656716, 0.15384615384615385, 0, 0.23076923076923078, 0, 0.10344827586206896, 0, 0.1267605633802817, 0.13432835820895522, 0.2727272727272727, 0, 0.23076923076923078, 0, 0.0967741935483871, 0, 0.14285714285714285, 0.09375, 0.10344827586206896, 0, 0.23076923076923078, 0, 0.043478260869565216, 0.10144927536231885, 0.05084745762711865, 0.061224489795918366, 0, 0.2222222222222222, 0.06756756756756757, 0.21212121212121213, 0.06578947368421052, 0.21212121212121213, 0.05084745762711865, 0.028169014084507043, 0.03125, 0.2857142857142857, 0.05714285714285714, 0.05555555555555555, 0.05405405405405406, 0.05263157894736842, 0.15789473684210525, 0.0273972602739726, 0.6, 0.15789473684210525, 0.0273972602739726, 0.6, 0.15789473684210525, 0.0273972602739726, 0.6, 0.15789473684210525, 0.0273972602739726, 0.6 ]
def __get_probable_center(self, distances, probabilities): """! @brief Calculates the next probable center considering amount candidates. @param[in] distances (array_like): Distances from each point to closest center. @param[in] probabilities (array_like): Cumulative probabilities of being center of each point. @return (uint) Index point that is next initialized center. """ index_best_candidate = -1 for _ in range(self.__candidates): candidate_probability = random.random() index_candidate = 0 for index_object in range(len(probabilities)): if candidate_probability < probabilities[index_object]: index_candidate = index_object break if index_best_candidate == -1: index_best_candidate = next(iter(self.__free_indexes)) elif distances[index_best_candidate] < distances[index_candidate]: index_best_candidate = index_candidate return index_best_candidate
[ "def", "__get_probable_center", "(", "self", ",", "distances", ",", "probabilities", ")", ":", "index_best_candidate", "=", "-", "1", "for", "_", "in", "range", "(", "self", ".", "__candidates", ")", ":", "candidate_probability", "=", "random", ".", "random", "(", ")", "index_candidate", "=", "0", "for", "index_object", "in", "range", "(", "len", "(", "probabilities", ")", ")", ":", "if", "candidate_probability", "<", "probabilities", "[", "index_object", "]", ":", "index_candidate", "=", "index_object", "break", "if", "index_best_candidate", "==", "-", "1", ":", "index_best_candidate", "=", "next", "(", "iter", "(", "self", ".", "__free_indexes", ")", ")", "elif", "distances", "[", "index_best_candidate", "]", "<", "distances", "[", "index_candidate", "]", ":", "index_best_candidate", "=", "index_candidate", "return", "index_best_candidate" ]
39.185185
[ 0.017241379310344827, 0.16666666666666666, 0.037037037037037035, 0, 0.05747126436781609, 0.049019607843137254, 0, 0.029850746268656716, 0, 0.18181818181818182, 0, 0.06060606060606061, 0.047619047619047616, 0.0392156862745098, 0.06451612903225806, 0, 0.034482758620689655, 0.028169014084507043, 0.04, 0.08, 0, 0.047619047619047616, 0.02857142857142857, 0.02564102564102564, 0.037037037037037035, 0, 0.05714285714285714 ]
def run_flag_hw(in_prefix, in_type, out_prefix, base_dir, options): """Runs step12 (flag HW). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.FlagHW.flag_hw` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.FlagHW.flag_hw` module doesn't return usable output files. Hence, this function returns the input file prefix and its type. """ # Creating the output directory os.mkdir(out_prefix) # We know we need bfile required_type = "bfile" check_input_files(in_prefix, in_type, required_type) # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "flag_hw") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: flag_hw.main(options) except flag_hw.ProgramError as e: msg = "flag_hw: {}".format(e) raise ProgramError(msg) # Finding the two files containing the list of flagged markers filenames = glob(script_prefix + ".snp_flag_threshold_[0-9]*") thresholds = {} for filename in filenames: # Finding the threshold of the file threshold = re.sub( r"^flag_hw.snp_flag_threshold_", "", os.path.basename(filename), ) # Counting the number of markers in the file nb_markers = None with open(filename, "r") as i_file: nb_markers = len(i_file.read().splitlines()) # Saving the values thresholds[threshold] = (nb_markers, filename) # We create the LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( flag_hw.pretty_name ) # Data to write sorted_keys = sorted(thresholds.keys(), key=float) text = ( "Markers which failed Hardy-Weinberg equilibrium test (using " "Plink) were flagged. A total of {:,d} marker{} failed with a " "threshold of {}. A total of {:,d} marker{} failed with a " "threshold of {}. For a total list, check the files {} and " "{}, respectively.".format( thresholds[sorted_keys[0]][0], "s" if thresholds[sorted_keys[0]][0] - 1 > 1 else "", latex_template.format_numbers(sorted_keys[0]), thresholds[sorted_keys[1]][0], "s" if thresholds[sorted_keys[1]][0] - 1 > 1 else "", latex_template.format_numbers(sorted_keys[1]), latex_template.texttt( latex_template.sanitize_tex(os.path.basename( thresholds[sorted_keys[0]][1], )), ), latex_template.texttt( latex_template.sanitize_tex(os.path.basename( thresholds[sorted_keys[1]][1], )), ), ) ) print >>o_file, latex_template.wrap_lines(text) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # Writing the summary results with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) print >>o_file, "Number of markers flagged for HW" print >>o_file, " - {}\t{:,d}".format( sorted_keys[0], thresholds[sorted_keys[0]][0], ) print >>o_file, " - {}\t{:,d}".format( sorted_keys[1], thresholds[sorted_keys[1]][0], ) print >>o_file, "---" # We know this step doesn't produce an new data set, so we return the old # prefix and the old in_type return _StepResult( next_file=in_prefix, next_file_type=required_type, latex_summary=latex_file, description=flag_hw.desc, long_description=flag_hw.long_desc, graph_path=None, )
[ "def", "run_flag_hw", "(", "in_prefix", ",", "in_type", ",", "out_prefix", ",", "base_dir", ",", "options", ")", ":", "# Creating the output directory", "os", ".", "mkdir", "(", "out_prefix", ")", "# We know we need bfile", "required_type", "=", "\"bfile\"", "check_input_files", "(", "in_prefix", ",", "in_type", ",", "required_type", ")", "# We need to inject the name of the input file and the name of the output", "# prefix", "script_prefix", "=", "os", ".", "path", ".", "join", "(", "out_prefix", ",", "\"flag_hw\"", ")", "options", "+=", "[", "\"--{}\"", ".", "format", "(", "required_type", ")", ",", "in_prefix", ",", "\"--out\"", ",", "script_prefix", "]", "# We run the script", "try", ":", "flag_hw", ".", "main", "(", "options", ")", "except", "flag_hw", ".", "ProgramError", "as", "e", ":", "msg", "=", "\"flag_hw: {}\"", ".", "format", "(", "e", ")", "raise", "ProgramError", "(", "msg", ")", "# Finding the two files containing the list of flagged markers", "filenames", "=", "glob", "(", "script_prefix", "+", "\".snp_flag_threshold_[0-9]*\"", ")", "thresholds", "=", "{", "}", "for", "filename", "in", "filenames", ":", "# Finding the threshold of the file", "threshold", "=", "re", ".", "sub", "(", "r\"^flag_hw.snp_flag_threshold_\"", ",", "\"\"", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", ")", "# Counting the number of markers in the file", "nb_markers", "=", "None", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "i_file", ":", "nb_markers", "=", "len", "(", "i_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", ")", "# Saving the values", "thresholds", "[", "threshold", "]", "=", "(", "nb_markers", ",", "filename", ")", "# We create the LaTeX summary", "latex_file", "=", "os", ".", "path", ".", "join", "(", "script_prefix", "+", "\".summary.tex\"", ")", "try", ":", "with", "open", "(", "latex_file", ",", "\"w\"", ")", "as", "o_file", ":", "print", ">>", "o_file", ",", "latex_template", ".", "subsection", "(", "flag_hw", ".", "pretty_name", ")", "# Data to write", "sorted_keys", "=", "sorted", "(", "thresholds", ".", "keys", "(", ")", ",", "key", "=", "float", ")", "text", "=", "(", "\"Markers which failed Hardy-Weinberg equilibrium test (using \"", "\"Plink) were flagged. A total of {:,d} marker{} failed with a \"", "\"threshold of {}. A total of {:,d} marker{} failed with a \"", "\"threshold of {}. For a total list, check the files {} and \"", "\"{}, respectively.\"", ".", "format", "(", "thresholds", "[", "sorted_keys", "[", "0", "]", "]", "[", "0", "]", ",", "\"s\"", "if", "thresholds", "[", "sorted_keys", "[", "0", "]", "]", "[", "0", "]", "-", "1", ">", "1", "else", "\"\"", ",", "latex_template", ".", "format_numbers", "(", "sorted_keys", "[", "0", "]", ")", ",", "thresholds", "[", "sorted_keys", "[", "1", "]", "]", "[", "0", "]", ",", "\"s\"", "if", "thresholds", "[", "sorted_keys", "[", "1", "]", "]", "[", "0", "]", "-", "1", ">", "1", "else", "\"\"", ",", "latex_template", ".", "format_numbers", "(", "sorted_keys", "[", "1", "]", ")", ",", "latex_template", ".", "texttt", "(", "latex_template", ".", "sanitize_tex", "(", "os", ".", "path", ".", "basename", "(", "thresholds", "[", "sorted_keys", "[", "0", "]", "]", "[", "1", "]", ",", ")", ")", ",", ")", ",", "latex_template", ".", "texttt", "(", "latex_template", ".", "sanitize_tex", "(", "os", ".", "path", ".", "basename", "(", "thresholds", "[", "sorted_keys", "[", "1", "]", "]", "[", "1", "]", ",", ")", ")", ",", ")", ",", ")", ")", "print", ">>", "o_file", ",", "latex_template", ".", "wrap_lines", "(", "text", ")", "except", "IOError", ":", "msg", "=", "\"{}: cannot write LaTeX summary\"", ".", "format", "(", "latex_file", ")", "raise", "ProgramError", "(", "msg", ")", "# Writing the summary results", "with", "open", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"results_summary.txt\"", ")", ",", "\"a\"", ")", "as", "o_file", ":", "print", ">>", "o_file", ",", "\"# {}\"", ".", "format", "(", "script_prefix", ")", "print", ">>", "o_file", ",", "\"Number of markers flagged for HW\"", "print", ">>", "o_file", ",", "\" - {}\\t{:,d}\"", ".", "format", "(", "sorted_keys", "[", "0", "]", ",", "thresholds", "[", "sorted_keys", "[", "0", "]", "]", "[", "0", "]", ",", ")", "print", ">>", "o_file", ",", "\" - {}\\t{:,d}\"", ".", "format", "(", "sorted_keys", "[", "1", "]", ",", "thresholds", "[", "sorted_keys", "[", "1", "]", "]", "[", "0", "]", ",", ")", "print", ">>", "o_file", ",", "\"---\"", "# We know this step doesn't produce an new data set, so we return the old", "# prefix and the old in_type", "return", "_StepResult", "(", "next_file", "=", "in_prefix", ",", "next_file_type", "=", "required_type", ",", "latex_summary", "=", "latex_file", ",", "description", "=", "flag_hw", ".", "desc", ",", "long_description", "=", "flag_hw", ".", "long_desc", ",", "graph_path", "=", "None", ",", ")" ]
36.649254
[ 0.014925373134328358, 0.06896551724137931, 0, 0.057692307692307696, 0.0625, 0.07317073170731707, 0.07142857142857142, 0.07692307692307693, 0, 0.125, 0.13636363636363635, 0.12, 0.13043478260869565, 0.13043478260869565, 0, 0.06756756756756757, 0.05405405405405406, 0.15384615384615385, 0, 0.09333333333333334, 0.038461538461538464, 0.07894736842105263, 0.05, 0, 0.23076923076923078, 0.09210526315789473, 0.02631578947368421, 0.11764705882352941, 0, 0.2857142857142857, 0.05714285714285714, 0.08333333333333333, 0, 0.07407407407407407, 0.07407407407407407, 0.03571428571428571, 0, 0.025974025974025976, 0.16666666666666666, 0.03636363636363636, 0.05357142857142857, 0.07692307692307693, 0, 0.08695652173913043, 0.25, 0.06896551724137931, 0.05405405405405406, 0.05405405405405406, 0.06451612903225806, 0, 0.030303030303030304, 0.030303030303030304, 0.10526315789473684, 0.06666666666666667, 0.046511627906976744, 0.1111111111111111, 0.045454545454545456, 0.13333333333333333, 0.05128205128205128, 0.3333333333333333, 0, 0.038461538461538464, 0.08, 0.046511627906976744, 0.03571428571428571, 0, 0.07407407407407407, 0.037037037037037035, 0, 0.06060606060606061, 0.03278688524590164, 0.25, 0.044444444444444446, 0.05555555555555555, 0.05714285714285714, 0.23076923076923078, 0, 0.07407407407407407, 0.03225806451612903, 0, 0.15, 0.02564102564102564, 0.02531645569620253, 0.02666666666666667, 0.02631578947368421, 0.06976744186046512, 0.04, 0.0273972602739726, 0.030303030303030304, 0.04, 0.0273972602739726, 0.030303030303030304, 0.07142857142857142, 0.043478260869565216, 0.034482758620689655, 0.1111111111111111, 0.13636363636363635, 0.07142857142857142, 0.043478260869565216, 0.034482758620689655, 0.1111111111111111, 0.13636363636363635, 0.17647058823529413, 0.23076923076923078, 0.03389830508474576, 0, 0.10526315789473684, 0.03076923076923077, 0.06451612903225806, 0, 0.06060606060606061, 0.02631578947368421, 0.038461538461538464, 0.034482758620689655, 0.06382978723404255, 0.046511627906976744, 0.034482758620689655, 0.12, 0.06382978723404255, 0.046511627906976744, 0.034482758620689655, 0.12, 0.06896551724137931, 0, 0.025974025974025976, 0.0625, 0.13043478260869565, 0.10714285714285714, 0.08108108108108109, 0.09090909090909091, 0.09090909090909091, 0.06976744186046512, 0.125, 0.6 ]
def initialize_from_string(content: str) -> 'CapitanSymbol': """ Create and initializes a new symbol from a string :param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image> :return: The initialized symbol :rtype: CapitanSymbol """ if content is None or content is "": return None parts = content.split(":") min_x = 100000 max_x = 0 min_y = 100000 max_y = 0 symbol_name = parts[0] sequence = parts[1] image_numbers = parts[2].split(',') image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30)) stroke = [] for point_string in sequence.split(";"): if point_string is "": continue # Skip the last element, that is due to a trailing ; in each line point_x, point_y = point_string.split(",") x = float(point_x) y = float(point_y) stroke.append(SimplePoint2D(x, y)) max_x = max(max_x, x) min_x = min(min_x, x) max_y = max(max_y, y) min_y = min(min_y, y) dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1)) return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
[ "def", "initialize_from_string", "(", "content", ":", "str", ")", "->", "'CapitanSymbol'", ":", "if", "content", "is", "None", "or", "content", "is", "\"\"", ":", "return", "None", "parts", "=", "content", ".", "split", "(", "\":\"", ")", "min_x", "=", "100000", "max_x", "=", "0", "min_y", "=", "100000", "max_y", "=", "0", "symbol_name", "=", "parts", "[", "0", "]", "sequence", "=", "parts", "[", "1", "]", "image_numbers", "=", "parts", "[", "2", "]", ".", "split", "(", "','", ")", "image_data", "=", "numpy", ".", "asarray", "(", "image_numbers", ",", "numpy", ".", "uint8", ")", ".", "reshape", "(", "(", "30", ",", "30", ")", ")", "stroke", "=", "[", "]", "for", "point_string", "in", "sequence", ".", "split", "(", "\";\"", ")", ":", "if", "point_string", "is", "\"\"", ":", "continue", "# Skip the last element, that is due to a trailing ; in each line", "point_x", ",", "point_y", "=", "point_string", ".", "split", "(", "\",\"", ")", "x", "=", "float", "(", "point_x", ")", "y", "=", "float", "(", "point_y", ")", "stroke", ".", "append", "(", "SimplePoint2D", "(", "x", ",", "y", ")", ")", "max_x", "=", "max", "(", "max_x", ",", "x", ")", "min_x", "=", "min", "(", "min_x", ",", "x", ")", "max_y", "=", "max", "(", "max_y", ",", "y", ")", "min_y", "=", "min", "(", "min_y", ",", "y", ")", "dimensions", "=", "Rectangle", "(", "Point2D", "(", "min_x", ",", "min_y", ")", ",", "int", "(", "max_x", "-", "min_x", "+", "1", ")", ",", "int", "(", "max_y", "-", "min_y", "+", "1", ")", ")", "return", "CapitanSymbol", "(", "content", ",", "stroke", ",", "image_data", ",", "symbol_name", ",", "dimensions", ")" ]
32.829268
[ 0.016666666666666666, 0.18181818181818182, 0.03508771929824561, 0.10619469026548672, 0.07692307692307693, 0.10344827586206896, 0.18181818181818182, 0, 0.045454545454545456, 0.08695652173913043, 0, 0.058823529411764705, 0.09090909090909091, 0.11764705882352941, 0.09090909090909091, 0.11764705882352941, 0, 0.06666666666666667, 0, 0.07407407407407407, 0.046511627906976744, 0.0375, 0, 0.10526315789473684, 0, 0.041666666666666664, 0.058823529411764705, 0.03296703296703297, 0, 0.037037037037037035, 0.06666666666666667, 0.06666666666666667, 0.043478260869565216, 0, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0.06060606060606061, 0, 0.0297029702970297, 0.036585365853658534 ]
def walk(self): """ Walks around and returns all objects which needs migration It does exactly the same as the original method, but add some progress loggers. :return: objects (with acquisition wrapper) that needs migration :rtype: generator """ catalog = self.catalog query = self.additionalQuery.copy() query['portal_type'] = self.src_portal_type query['meta_type'] = self.src_meta_type if HAS_LINGUA_PLONE and 'Language' in catalog.indexes(): query['Language'] = 'all' brains = catalog(query) limit = getattr(self, 'limit', False) if limit: brains = brains[:limit] obj_num_total = len(brains) logger.info('{} {} objects will be migrated walking through {}' .format(obj_num_total, self.src_portal_type, catalog.id)) counter = 0 for brain in brains: if counter % 100 == 0: logger.info('Progress: {} objects have been migrated out of {}' .format(counter, obj_num_total)) try: obj = brain.getObject() except AttributeError: LOG.error("Couldn't access %s" % brain.getPath()) continue if self.callBefore is not None and callable(self.callBefore): if not self.callBefore(obj, **self.kwargs): continue try: state = obj._p_changed except Exception: state = 0 if obj is not None: yield obj # safe my butt if state is None: obj._p_deactivate() counter += 1 if obj_num_total == counter: logger.info( 'Progress: {} objects have been migrated out of {}' .format(counter, obj_num_total))
[ "def", "walk", "(", "self", ")", ":", "catalog", "=", "self", ".", "catalog", "query", "=", "self", ".", "additionalQuery", ".", "copy", "(", ")", "query", "[", "'portal_type'", "]", "=", "self", ".", "src_portal_type", "query", "[", "'meta_type'", "]", "=", "self", ".", "src_meta_type", "if", "HAS_LINGUA_PLONE", "and", "'Language'", "in", "catalog", ".", "indexes", "(", ")", ":", "query", "[", "'Language'", "]", "=", "'all'", "brains", "=", "catalog", "(", "query", ")", "limit", "=", "getattr", "(", "self", ",", "'limit'", ",", "False", ")", "if", "limit", ":", "brains", "=", "brains", "[", ":", "limit", "]", "obj_num_total", "=", "len", "(", "brains", ")", "logger", ".", "info", "(", "'{} {} objects will be migrated walking through {}'", ".", "format", "(", "obj_num_total", ",", "self", ".", "src_portal_type", ",", "catalog", ".", "id", ")", ")", "counter", "=", "0", "for", "brain", "in", "brains", ":", "if", "counter", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "'Progress: {} objects have been migrated out of {}'", ".", "format", "(", "counter", ",", "obj_num_total", ")", ")", "try", ":", "obj", "=", "brain", ".", "getObject", "(", ")", "except", "AttributeError", ":", "LOG", ".", "error", "(", "\"Couldn't access %s\"", "%", "brain", ".", "getPath", "(", ")", ")", "continue", "if", "self", ".", "callBefore", "is", "not", "None", "and", "callable", "(", "self", ".", "callBefore", ")", ":", "if", "not", "self", ".", "callBefore", "(", "obj", ",", "*", "*", "self", ".", "kwargs", ")", ":", "continue", "try", ":", "state", "=", "obj", ".", "_p_changed", "except", "Exception", ":", "state", "=", "0", "if", "obj", "is", "not", "None", ":", "yield", "obj", "# safe my butt", "if", "state", "is", "None", ":", "obj", ".", "_p_deactivate", "(", ")", "counter", "+=", "1", "if", "obj_num_total", "==", "counter", ":", "logger", ".", "info", "(", "'Progress: {} objects have been migrated out of {}'", ".", "format", "(", "counter", ",", "obj_num_total", ")", ")" ]
36.150943
[ 0.06666666666666667, 0.18181818181818182, 0.030303030303030304, 0.028985507246376812, 0.08, 0, 0.05555555555555555, 0.12, 0.18181818181818182, 0.06666666666666667, 0.046511627906976744, 0.0392156862745098, 0.0425531914893617, 0, 0.03125, 0.05405405405405406, 0, 0.06451612903225806, 0.044444444444444446, 0.11764705882352941, 0.05714285714285714, 0.05714285714285714, 0.04225352112676056, 0.03896103896103896, 0.10526315789473684, 0.07142857142857142, 0.058823529411764705, 0.0379746835443038, 0.05, 0.125, 0.05128205128205128, 0.058823529411764705, 0.03076923076923077, 0.08333333333333333, 0, 0.0273972602739726, 0.03389830508474576, 0.07142857142857142, 0, 0.125, 0.05263157894736842, 0.06896551724137931, 0.08, 0.06451612903225806, 0.08, 0.06666666666666667, 0.06060606060606061, 0.05128205128205128, 0.08333333333333333, 0.05, 0.10714285714285714, 0.028169014084507043, 0.057692307692307696 ]
def select_save_engine(engine, paralleled=False): ''' select save_engine , tushare ts Tushare 使用 Tushare 免费数据接口, tdx 使用通达信数据接口 :param engine: 字符串Str :param paralleled: 是否并行处理;默认为False :return: sts means save_tushare_py or stdx means save_tdx_py ''' if engine in ['tushare', 'ts', 'Tushare']: return sts elif engine in ['tdx']: if paralleled: return stdx_parallelism else: return stdx elif engine in ['gm', 'goldenminer']: return sgm elif engine in ['jq', 'joinquant']: return sjq else: print('QA Error QASU.main.py call select_save_engine with parameter %s is None of thshare, ts, Thshare, or tdx', engine)
[ "def", "select_save_engine", "(", "engine", ",", "paralleled", "=", "False", ")", ":", "if", "engine", "in", "[", "'tushare'", ",", "'ts'", ",", "'Tushare'", "]", ":", "return", "sts", "elif", "engine", "in", "[", "'tdx'", "]", ":", "if", "paralleled", ":", "return", "stdx_parallelism", "else", ":", "return", "stdx", "elif", "engine", "in", "[", "'gm'", ",", "'goldenminer'", "]", ":", "return", "sgm", "elif", "engine", "in", "[", "'jq'", ",", "'joinquant'", "]", ":", "return", "sjq", "else", ":", "print", "(", "'QA Error QASU.main.py call select_save_engine with parameter %s is None of thshare, ts, Thshare, or tdx'", ",", "engine", ")" ]
35.25
[ 0.02040816326530612, 0.2857142857142857, 0.039473684210526314, 0.12, 0.07894736842105263, 0.06153846153846154, 0.2857142857142857, 0.043478260869565216, 0.1111111111111111, 0.07407407407407407, 0.09090909090909091, 0.05714285714285714, 0.15384615384615385, 0.08695652173913043, 0.04878048780487805, 0.1111111111111111, 0.05128205128205128, 0.1111111111111111, 0.2222222222222222, 0.023255813953488372 ]
def timezone(client, location, timestamp=None, language=None): """Get time zone for a location on the earth, as well as that location's time offset from UTC. :param location: The latitude/longitude value representing the location to look up. :type location: string, dict, list, or tuple :param timestamp: Timestamp specifies the desired time as seconds since midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. Times before 1970 can be expressed as negative values. Optional. Defaults to ``datetime.utcnow()``. :type timestamp: int or datetime.datetime :param language: The language in which to return results. :type language: string :rtype: dict """ params = { "location": convert.latlng(location), "timestamp": convert.time(timestamp or datetime.utcnow()) } if language: params["language"] = language return client._request( "/maps/api/timezone/json", params)
[ "def", "timezone", "(", "client", ",", "location", ",", "timestamp", "=", "None", ",", "language", "=", "None", ")", ":", "params", "=", "{", "\"location\"", ":", "convert", ".", "latlng", "(", "location", ")", ",", "\"timestamp\"", ":", "convert", ".", "time", "(", "timestamp", "or", "datetime", ".", "utcnow", "(", ")", ")", "}", "if", "language", ":", "params", "[", "\"language\"", "]", "=", "language", "return", "client", ".", "_request", "(", "\"/maps/api/timezone/json\"", ",", "params", ")" ]
34.633333
[ 0.016129032258064516, 0.02631578947368421, 0.08, 0, 0.038461538461538464, 0.125, 0.0625, 0, 0.04, 0.02564102564102564, 0.02702702702702703, 0.02564102564102564, 0.1, 0.06666666666666667, 0, 0.04918032786885246, 0.11538461538461539, 0, 0.1875, 0.2857142857142857, 0, 0.21428571428571427, 0.044444444444444446, 0.03076923076923077, 0.6, 0, 0.125, 0.05405405405405406, 0, 0.04838709677419355 ]
def nest_reducer(x, g): """ Create a ast.For node from a comprehension and another node. g is an ast.comprehension. x is the code that have to be executed. Examples -------- >> [i for i in xrange(2)] Becomes >> for i in xrange(2): >> ... x code with if clauses ... It is a reducer as it can be call recursively for mutli generator. Ex : >> [i, j for i in xrange(2) for j in xrange(4)] """ def wrap_in_ifs(node, ifs): """ Wrap comprehension content in all possibles if clauses. Examples -------- >> [i for i in xrange(2) if i < 3 if 0 < i] Becomes >> for i in xrange(2): >> if i < 3: >> if 0 < i: >> ... the code from `node` ... Note the nested ifs clauses. """ return reduce(lambda n, if_: ast.If(if_, [n], []), ifs, node) return ast.For(g.target, g.iter, [wrap_in_ifs(x, g.ifs)], [])
[ "def", "nest_reducer", "(", "x", ",", "g", ")", ":", "def", "wrap_in_ifs", "(", "node", ",", "ifs", ")", ":", "\"\"\"\n Wrap comprehension content in all possibles if clauses.\n\n Examples\n --------\n >> [i for i in xrange(2) if i < 3 if 0 < i]\n\n Becomes\n\n >> for i in xrange(2):\n >> if i < 3:\n >> if 0 < i:\n >> ... the code from `node` ...\n\n Note the nested ifs clauses.\n \"\"\"", "return", "reduce", "(", "lambda", "n", ",", "if_", ":", "ast", ".", "If", "(", "if_", ",", "[", "n", "]", ",", "[", "]", ")", ",", "ifs", ",", "node", ")", "return", "ast", ".", "For", "(", "g", ".", "target", ",", "g", ".", "iter", ",", "[", "wrap_in_ifs", "(", "x", ",", "g", ".", "ifs", ")", "]", ",", "[", "]", ")" ]
27.076923
[ 0.043478260869565216, 0.18181818181818182, 0.029411764705882353, 0, 0.058823529411764705, 0.0425531914893617, 0, 0.125, 0.125, 0.06060606060606061, 0, 0.13333333333333333, 0, 0.06666666666666667, 0.06818181818181818, 0, 0.02702702702702703, 0, 0.05, 0.18181818181818182, 0.05714285714285714, 0.13333333333333333, 0.029850746268656716, 0, 0.1, 0.1, 0.03636363636363636, 0, 0.10526315789473684, 0, 0.058823529411764705, 0.14814814814814814, 0.12903225806451613, 0.07407407407407407, 0, 0.05, 0.13333333333333333, 0.0273972602739726, 0.028985507246376812 ]
def ethernet_adapters(self, ethernet_adapters): """ Sets the number of Ethernet adapters for this IOU VM. :param ethernet_adapters: number of adapters """ self._ethernet_adapters.clear() for _ in range(0, ethernet_adapters): self._ethernet_adapters.append(EthernetAdapter(interfaces=4)) log.info('IOU "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(name=self._name, id=self._id, adapters=len(self._ethernet_adapters))) self._adapters = self._ethernet_adapters + self._serial_adapters
[ "def", "ethernet_adapters", "(", "self", ",", "ethernet_adapters", ")", ":", "self", ".", "_ethernet_adapters", ".", "clear", "(", ")", "for", "_", "in", "range", "(", "0", ",", "ethernet_adapters", ")", ":", "self", ".", "_ethernet_adapters", ".", "append", "(", "EthernetAdapter", "(", "interfaces", "=", "4", ")", ")", "log", ".", "info", "(", "'IOU \"{name}\" [{id}]: number of Ethernet adapters changed to {adapters}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ",", "adapters", "=", "len", "(", "self", ".", "_ethernet_adapters", ")", ")", ")", "self", ".", "_adapters", "=", "self", ".", "_ethernet_adapters", "+", "self", ".", "_serial_adapters" ]
48.0625
[ 0.02127659574468085, 0.18181818181818182, 0.03278688524590164, 0, 0.057692307692307696, 0.18181818181818182, 0, 0.05128205128205128, 0.044444444444444446, 0.0273972602739726, 0, 0.035398230088495575, 0.045871559633027525, 0.04411764705882353, 0, 0.027777777777777776 ]
def visit(self, url=''): """Visit the url, checking for rr errors in the response @param url: URL @return: Visit result """ result = super(CoyoteDriver, self).visit(url) source = self.page_source() return result
[ "def", "visit", "(", "self", ",", "url", "=", "''", ")", ":", "result", "=", "super", "(", "CoyoteDriver", ",", "self", ")", ".", "visit", "(", "url", ")", "source", "=", "self", ".", "page_source", "(", ")", "return", "result" ]
28.888889
[ 0.041666666666666664, 0.03125, 0, 0.08695652173913043, 0.06896551724137931, 0.18181818181818182, 0.03773584905660377, 0.05714285714285714, 0.09523809523809523 ]