text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def list_all_before(self, message_id, limit=None): """Return all group messages created before a message. :param str message_id: the ID of a message :param int limit: maximum number of messages per page :return: group messages :rtype: generator """ return self.list_before(message_id, limit=limit).autopage()
[ "def", "list_all_before", "(", "self", ",", "message_id", ",", "limit", "=", "None", ")", ":", "return", "self", ".", "list_before", "(", "message_id", ",", "limit", "=", "limit", ")", ".", "autopage", "(", ")" ]
39.666667
0.005479
def reindex_repo_dev_panel(self, project, repository): """ Reindex all of the Jira issues related to this repository, including branches and pull requests. This automatically happens as part of an upgrade, and calling this manually should only be required if something unforeseen happens and the index becomes out of sync. The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource. :param project: :param repository: :return: """ url = 'rest/jira-dev/1.0/projects/{projectKey}/repos/{repositorySlug}/reindex'.format(projectKey=project, repositorySlug=repository) return self.post(url)
[ "def", "reindex_repo_dev_panel", "(", "self", ",", "project", ",", "repository", ")", ":", "url", "=", "'rest/jira-dev/1.0/projects/{projectKey}/repos/{repositorySlug}/reindex'", ".", "format", "(", "projectKey", "=", "project", ",", "repositorySlug", "=", "repository", ")", "return", "self", ".", "post", "(", "url", ")" ]
61.692308
0.0086
def intersperse(lis, value): """Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list """ out = [value] * (len(lis) * 2 - 1) out[0::2] = lis return out
[ "def", "intersperse", "(", "lis", ",", "value", ")", ":", "out", "=", "[", "value", "]", "*", "(", "len", "(", "lis", ")", "*", "2", "-", "1", ")", "out", "[", "0", ":", ":", "2", "]", "=", "lis", "return", "out" ]
18.055556
0.002924
def set_weather(self, weather_type): """Queue up a set weather command. It will be applied when `tick` or `step` is called next. By the next tick, the lighting, skysphere, fog, and relevant particle systems will be updated and/or spawned to the given weather. If there is no skysphere or directional light in the world, the command may not function properly but will not cause a crash. NOTE: Because this command can effect the fog density, any changes made by a change_fog_density command before a set_weather command called will be undone. It is recommended to call change_fog_density after calling set weather. Args: weather_type (str): The type of weather, which can be 'Rain' or 'Cloudy'. In all downloadable worlds, the weather is clear by default. If the given type string is not available, the command will not be sent. """ if not SetWeatherCommand.has_type(weather_type.lower()): raise HolodeckException("Invalid weather type " + weather_type) self._should_write_to_command_buffer = True command_to_send = SetWeatherCommand(weather_type.lower()) self._commands.add_command(command_to_send)
[ "def", "set_weather", "(", "self", ",", "weather_type", ")", ":", "if", "not", "SetWeatherCommand", ".", "has_type", "(", "weather_type", ".", "lower", "(", ")", ")", ":", "raise", "HolodeckException", "(", "\"Invalid weather type \"", "+", "weather_type", ")", "self", ".", "_should_write_to_command_buffer", "=", "True", "command_to_send", "=", "SetWeatherCommand", "(", "weather_type", ".", "lower", "(", ")", ")", "self", ".", "_commands", ".", "add_command", "(", "command_to_send", ")" ]
58.190476
0.007246
def check_unused(intersection, duplicates, intersections): """Check if a "valid" ``intersection`` is already in ``intersections``. This assumes that * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0`` * At least one of the intersections in ``intersections`` is classified as ``COINCIDENT_UNUSED``. Args: intersection (.Intersection): An intersection to be added. duplicates (List[.Intersection]): List of duplicate intersections. intersections (List[.Intersection]): List of "accepted" (i.e. non-duplicate) intersections. Returns: bool: Indicates if the ``intersection`` is a duplicate. """ for other in intersections: if ( other.interior_curve == UNUSED_T and intersection.index_first == other.index_first and intersection.index_second == other.index_second ): if intersection.s == 0.0 and other.s == 0.0: duplicates.append(intersection) return True if intersection.t == 0.0 and other.t == 0.0: duplicates.append(intersection) return True return False
[ "def", "check_unused", "(", "intersection", ",", "duplicates", ",", "intersections", ")", ":", "for", "other", "in", "intersections", ":", "if", "(", "other", ".", "interior_curve", "==", "UNUSED_T", "and", "intersection", ".", "index_first", "==", "other", ".", "index_first", "and", "intersection", ".", "index_second", "==", "other", ".", "index_second", ")", ":", "if", "intersection", ".", "s", "==", "0.0", "and", "other", ".", "s", "==", "0.0", ":", "duplicates", ".", "append", "(", "intersection", ")", "return", "True", "if", "intersection", ".", "t", "==", "0.0", "and", "other", ".", "t", "==", "0.0", ":", "duplicates", ".", "append", "(", "intersection", ")", "return", "True", "return", "False" ]
35.575758
0.000829
def _split_url_string(param_str): """Turn URL string into parameters.""" if not PY3: # If passed unicode with quoted UTF8, Python2's parse_qs leaves # mojibake'd uniocde after unquoting, so encode first. param_str = b(param_str, 'utf-8') parameters = parse_qs(param_str, keep_blank_values=True) for k, v in parameters.items(): if len(v) == 1: parameters[k] = unquote(v[0]) else: parameters[k] = sorted([unquote(s) for s in v]) return parameters
[ "def", "_split_url_string", "(", "param_str", ")", ":", "if", "not", "PY3", ":", "# If passed unicode with quoted UTF8, Python2's parse_qs leaves", "# mojibake'd uniocde after unquoting, so encode first.", "param_str", "=", "b", "(", "param_str", ",", "'utf-8'", ")", "parameters", "=", "parse_qs", "(", "param_str", ",", "keep_blank_values", "=", "True", ")", "for", "k", ",", "v", "in", "parameters", ".", "items", "(", ")", ":", "if", "len", "(", "v", ")", "==", "1", ":", "parameters", "[", "k", "]", "=", "unquote", "(", "v", "[", "0", "]", ")", "else", ":", "parameters", "[", "k", "]", "=", "sorted", "(", "[", "unquote", "(", "s", ")", "for", "s", "in", "v", "]", ")", "return", "parameters" ]
43.384615
0.003472
def get_settings(self, index=None, name=None, params=None): """ Retrieve settings for one or more (or all) indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg name: The name of the settings that should be included :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default ['open', 'closed'], valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request( "GET", _make_path(index, "_settings", name), params=params )
[ "def", "get_settings", "(", "self", ",", "index", "=", "None", ",", "name", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "\"GET\"", ",", "_make_path", "(", "index", ",", "\"_settings\"", ",", "name", ")", ",", "params", "=", "params", ")" ]
57.6
0.002732
def _sync(original, processed): """ Add output to data if run sucessfully. For now only macs2 is available, so no need to consider multiple callers. """ for original_sample in original: original_sample[0]["peaks_files"] = {} for process_sample in processed: if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(process_sample[0]): for key in ["peaks_files"]: if process_sample[0].get(key): original_sample[0][key] = process_sample[0][key] return original
[ "def", "_sync", "(", "original", ",", "processed", ")", ":", "for", "original_sample", "in", "original", ":", "original_sample", "[", "0", "]", "[", "\"peaks_files\"", "]", "=", "{", "}", "for", "process_sample", "in", "processed", ":", "if", "dd", ".", "get_sample_name", "(", "original_sample", "[", "0", "]", ")", "==", "dd", ".", "get_sample_name", "(", "process_sample", "[", "0", "]", ")", ":", "for", "key", "in", "[", "\"peaks_files\"", "]", ":", "if", "process_sample", "[", "0", "]", ".", "get", "(", "key", ")", ":", "original_sample", "[", "0", "]", "[", "key", "]", "=", "process_sample", "[", "0", "]", "[", "key", "]", "return", "original" ]
40.571429
0.003442
def list(self,walkTrace=tuple(),case=None,element=None): """List section titles. """ if case == 'sectionmain': print(walkTrace,self.title)
[ "def", "list", "(", "self", ",", "walkTrace", "=", "tuple", "(", ")", ",", "case", "=", "None", ",", "element", "=", "None", ")", ":", "if", "case", "==", "'sectionmain'", ":", "print", "(", "walkTrace", ",", "self", ".", "title", ")" ]
39.75
0.04321
def doc(self): """Docstring of property on target or override specified on PV-object.""" return self._pv.doc or inspect.getdoc( getattr(type(self._target), self._pv.property, None)) or ''
[ "def", "doc", "(", "self", ")", ":", "return", "self", ".", "_pv", ".", "doc", "or", "inspect", ".", "getdoc", "(", "getattr", "(", "type", "(", "self", ".", "_target", ")", ",", "self", ".", "_pv", ".", "property", ",", "None", ")", ")", "or", "''" ]
53
0.013953
def _parse_format(self, format): """ Converts the input format to a regular expression, as well as extracting fields Raises an exception if it couldn't compile the generated regex. """ format = format.strip() format = re.sub('[ \t]+',' ',format) subpatterns = [] findquotes = re.compile(r'^\\"') findreferreragent = re.compile('Referer|User-Agent') findpercent = re.compile('^%.*t$') lstripquotes = re.compile(r'^\\"') rstripquotes = re.compile(r'\\"$') header = re.compile(r'.*%\{([^\}]+)\}i') for element in format.split(' '): hasquotes = 0 if findquotes.search(element): hasquotes = 1 if hasquotes: element = lstripquotes.sub('', element) element = rstripquotes.sub('', element) head = header.match(element) if head: self._names.append(head.groups()[0].lower()) self._types.append(str) else: self._names.append(self.alias(element)) self._types.append(self.types.get(element, [None, str])[1]) subpattern = '(\S*)' if hasquotes: if element == '%r' or findreferreragent.search(element): subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"' else: subpattern = r'\"([^\"]*)\"' elif findpercent.search(element): subpattern = r'(\[[^\]]+\])' elif element == '%U': subpattern = '(.+?)' subpatterns.append(subpattern) self._pattern = '^' + ' '.join(subpatterns) + '$' try: self._regex = re.compile(self._pattern) except Exception as e: raise ApacheLogParserError(e)
[ "def", "_parse_format", "(", "self", ",", "format", ")", ":", "format", "=", "format", ".", "strip", "(", ")", "format", "=", "re", ".", "sub", "(", "'[ \\t]+'", ",", "' '", ",", "format", ")", "subpatterns", "=", "[", "]", "findquotes", "=", "re", ".", "compile", "(", "r'^\\\\\"'", ")", "findreferreragent", "=", "re", ".", "compile", "(", "'Referer|User-Agent'", ")", "findpercent", "=", "re", ".", "compile", "(", "'^%.*t$'", ")", "lstripquotes", "=", "re", ".", "compile", "(", "r'^\\\\\"'", ")", "rstripquotes", "=", "re", ".", "compile", "(", "r'\\\\\"$'", ")", "header", "=", "re", ".", "compile", "(", "r'.*%\\{([^\\}]+)\\}i'", ")", "for", "element", "in", "format", ".", "split", "(", "' '", ")", ":", "hasquotes", "=", "0", "if", "findquotes", ".", "search", "(", "element", ")", ":", "hasquotes", "=", "1", "if", "hasquotes", ":", "element", "=", "lstripquotes", ".", "sub", "(", "''", ",", "element", ")", "element", "=", "rstripquotes", ".", "sub", "(", "''", ",", "element", ")", "head", "=", "header", ".", "match", "(", "element", ")", "if", "head", ":", "self", ".", "_names", ".", "append", "(", "head", ".", "groups", "(", ")", "[", "0", "]", ".", "lower", "(", ")", ")", "self", ".", "_types", ".", "append", "(", "str", ")", "else", ":", "self", ".", "_names", ".", "append", "(", "self", ".", "alias", "(", "element", ")", ")", "self", ".", "_types", ".", "append", "(", "self", ".", "types", ".", "get", "(", "element", ",", "[", "None", ",", "str", "]", ")", "[", "1", "]", ")", "subpattern", "=", "'(\\S*)'", "if", "hasquotes", ":", "if", "element", "==", "'%r'", "or", "findreferreragent", ".", "search", "(", "element", ")", ":", "subpattern", "=", "r'\\\"([^\"\\\\]*(?:\\\\.[^\"\\\\]*)*)\\\"'", "else", ":", "subpattern", "=", "r'\\\"([^\\\"]*)\\\"'", "elif", "findpercent", ".", "search", "(", "element", ")", ":", "subpattern", "=", "r'(\\[[^\\]]+\\])'", "elif", "element", "==", "'%U'", ":", "subpattern", "=", "'(.+?)'", "subpatterns", ".", "append", "(", "subpattern", ")", "self", ".", "_pattern", "=", "'^'", "+", "' '", ".", "join", "(", "subpatterns", ")", "+", "'$'", "try", ":", "self", ".", "_regex", "=", "re", ".", "compile", "(", "self", ".", "_pattern", ")", "except", "Exception", "as", "e", ":", "raise", "ApacheLogParserError", "(", "e", ")" ]
33.086207
0.007591
def choose_spot_zone(zones, bid, spot_history): """ Returns the zone to put the spot request based on, in order of priority: 1) zones with prices currently under the bid 2) zones with the most stable price :param list[boto.ec2.zone.Zone] zones: :param float bid: :param list[boto.ec2.spotpricehistory.SpotPriceHistory] spot_history: :rtype: str :return: the name of the selected zone >>> from collections import namedtuple >>> FauxHistory = namedtuple('FauxHistory', ['price', 'availability_zone']) >>> ZoneTuple = namedtuple('ZoneTuple', ['name']) >>> zones = [ZoneTuple('us-west-2a'), ZoneTuple('us-west-2b')] >>> spot_history = [FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.3, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2a' >>> spot_history=[FauxHistory(0.3, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b' >>> spot_history=[FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.7, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b' """ # Create two lists of tuples of form: [(zone.name, std_deviation), ...] one for zones # over the bid price and one for zones under bid price. Each are sorted by increasing # standard deviation values. markets_under_bid, markets_over_bid = [], [] for zone in zones: zone_histories = [zone_history for zone_history in spot_history if zone_history.availability_zone == zone.name] if zone_histories: price_deviation = std_dev([history.price for history in zone_histories]) recent_price = zone_histories[0].price else: price_deviation, recent_price = 0.0, bid zone_tuple = ZoneTuple(name=zone.name, price_deviation=price_deviation) (markets_over_bid, markets_under_bid)[recent_price < bid].append(zone_tuple) return min(markets_under_bid or markets_over_bid, key=attrgetter('price_deviation')).name
[ "def", "choose_spot_zone", "(", "zones", ",", "bid", ",", "spot_history", ")", ":", "# Create two lists of tuples of form: [(zone.name, std_deviation), ...] one for zones", "# over the bid price and one for zones under bid price. Each are sorted by increasing", "# standard deviation values.", "markets_under_bid", ",", "markets_over_bid", "=", "[", "]", ",", "[", "]", "for", "zone", "in", "zones", ":", "zone_histories", "=", "[", "zone_history", "for", "zone_history", "in", "spot_history", "if", "zone_history", ".", "availability_zone", "==", "zone", ".", "name", "]", "if", "zone_histories", ":", "price_deviation", "=", "std_dev", "(", "[", "history", ".", "price", "for", "history", "in", "zone_histories", "]", ")", "recent_price", "=", "zone_histories", "[", "0", "]", ".", "price", "else", ":", "price_deviation", ",", "recent_price", "=", "0.0", ",", "bid", "zone_tuple", "=", "ZoneTuple", "(", "name", "=", "zone", ".", "name", ",", "price_deviation", "=", "price_deviation", ")", "(", "markets_over_bid", ",", "markets_under_bid", ")", "[", "recent_price", "<", "bid", "]", ".", "append", "(", "zone_tuple", ")", "return", "min", "(", "markets_under_bid", "or", "markets_over_bid", ",", "key", "=", "attrgetter", "(", "'price_deviation'", ")", ")", ".", "name" ]
43.745455
0.002846
def selected_display_item(self) -> typing.Optional[DisplayItem.DisplayItem]: """Return the selected display item. The selected display is the display ite that has keyboard focus in the data panel or a display panel. """ # first check for the [focused] data browser display_item = self.focused_display_item if not display_item: selected_display_panel = self.selected_display_panel display_item = selected_display_panel.display_item if selected_display_panel else None return display_item
[ "def", "selected_display_item", "(", "self", ")", "->", "typing", ".", "Optional", "[", "DisplayItem", ".", "DisplayItem", "]", ":", "# first check for the [focused] data browser", "display_item", "=", "self", ".", "focused_display_item", "if", "not", "display_item", ":", "selected_display_panel", "=", "self", ".", "selected_display_panel", "display_item", "=", "selected_display_panel", ".", "display_item", "if", "selected_display_panel", "else", "None", "return", "display_item" ]
50.636364
0.007055
def filter_products(self, desired_prods): """When asked for a product, filter only those on this list.""" self.filter_prods = True self.desired_prods = set(desired_prods)
[ "def", "filter_products", "(", "self", ",", "desired_prods", ")", ":", "self", ".", "filter_prods", "=", "True", "self", ".", "desired_prods", "=", "set", "(", "desired_prods", ")" ]
38.2
0.010256
def read_frames(self, copy=True): '''Iterate over the data frames from our C3D file handle. Parameters ---------- copy : bool If False, the reader returns a reference to the same data buffers for every frame. The default is True, which causes the reader to return a unique data buffer for each frame. Set this to False if you consume frames as you iterate over them, or True if you store them for later. Returns ------- frames : sequence of (frame number, points, analog) This method generates a sequence of (frame number, points, analog) tuples, one tuple per frame. The first element of each tuple is the frame number. The second is a numpy array of parsed, 5D point data and the third element of each tuple is a numpy array of analog values that were recorded during the frame. (Often the analog data are sampled at a higher frequency than the 3D point data, resulting in multiple analog frames per frame of point data.) The first three columns in the returned point data are the (x, y, z) coordinates of the observed motion capture point. The fourth column is an estimate of the error for this particular point, and the fifth column is the number of cameras that observed the point in question. Both the fourth and fifth values are -1 if the point is considered to be invalid. ''' scale = abs(self.point_scale) is_float = self.point_scale < 0 point_bytes = [2, 4][is_float] point_dtype = [np.int16, np.float32][is_float] point_scale = [scale, 1][is_float] points = np.zeros((self.point_used, 5), float) # TODO: handle ANALOG:BITS parameter here! p = self.get('ANALOG:FORMAT') analog_unsigned = p and p.string_value.strip().upper() == 'UNSIGNED' analog_dtype = np.int16 analog_bytes = 2 if is_float: analog_dtype = np.float32 analog_bytes = 4 elif analog_unsigned: analog_dtype = np.uint16 analog_bytes = 2 analog = np.array([], float) offsets = np.zeros((self.analog_used, 1), int) param = self.get('ANALOG:OFFSET') if param is not None: offsets = param.int16_array[:self.analog_used, None] scales = np.ones((self.analog_used, 1), float) param = self.get('ANALOG:SCALE') if param is not None: scales = param.float_array[:self.analog_used, None] gen_scale = 1. param = self.get('ANALOG:GEN_SCALE') if param is not None: gen_scale = param.float_value self._handle.seek((self.header.data_block - 1) * 512) for frame_no in range(self.first_frame(), self.last_frame() + 1): n = 4 * self.header.point_count raw = np.fromstring(self._handle.read(n * point_bytes), dtype=point_dtype, count=n).reshape((self.point_used, 4)) points[:, :3] = raw[:, :3] * point_scale valid = raw[:, 3] > -1 points[~valid, 3:5] = -1 c = raw[valid, 3].astype(np.uint16) # fourth value is floating-point (scaled) error estimate points[valid, 3] = (c & 0xff).astype(float) * scale # fifth value is number of bits set in camera-observation byte points[valid, 4] = sum((c & (1 << k)) >> k for k in range(8, 17)) if self.header.analog_count > 0: n = self.header.analog_count raw = np.fromstring(self._handle.read(n * analog_bytes), dtype=analog_dtype, count=n).reshape((-1, self.analog_used)).T analog = (raw.astype(float) - offsets) * scales * gen_scale if copy: yield frame_no, points.copy(), analog.copy() else: yield frame_no, points, analog
[ "def", "read_frames", "(", "self", ",", "copy", "=", "True", ")", ":", "scale", "=", "abs", "(", "self", ".", "point_scale", ")", "is_float", "=", "self", ".", "point_scale", "<", "0", "point_bytes", "=", "[", "2", ",", "4", "]", "[", "is_float", "]", "point_dtype", "=", "[", "np", ".", "int16", ",", "np", ".", "float32", "]", "[", "is_float", "]", "point_scale", "=", "[", "scale", ",", "1", "]", "[", "is_float", "]", "points", "=", "np", ".", "zeros", "(", "(", "self", ".", "point_used", ",", "5", ")", ",", "float", ")", "# TODO: handle ANALOG:BITS parameter here!", "p", "=", "self", ".", "get", "(", "'ANALOG:FORMAT'", ")", "analog_unsigned", "=", "p", "and", "p", ".", "string_value", ".", "strip", "(", ")", ".", "upper", "(", ")", "==", "'UNSIGNED'", "analog_dtype", "=", "np", ".", "int16", "analog_bytes", "=", "2", "if", "is_float", ":", "analog_dtype", "=", "np", ".", "float32", "analog_bytes", "=", "4", "elif", "analog_unsigned", ":", "analog_dtype", "=", "np", ".", "uint16", "analog_bytes", "=", "2", "analog", "=", "np", ".", "array", "(", "[", "]", ",", "float", ")", "offsets", "=", "np", ".", "zeros", "(", "(", "self", ".", "analog_used", ",", "1", ")", ",", "int", ")", "param", "=", "self", ".", "get", "(", "'ANALOG:OFFSET'", ")", "if", "param", "is", "not", "None", ":", "offsets", "=", "param", ".", "int16_array", "[", ":", "self", ".", "analog_used", ",", "None", "]", "scales", "=", "np", ".", "ones", "(", "(", "self", ".", "analog_used", ",", "1", ")", ",", "float", ")", "param", "=", "self", ".", "get", "(", "'ANALOG:SCALE'", ")", "if", "param", "is", "not", "None", ":", "scales", "=", "param", ".", "float_array", "[", ":", "self", ".", "analog_used", ",", "None", "]", "gen_scale", "=", "1.", "param", "=", "self", ".", "get", "(", "'ANALOG:GEN_SCALE'", ")", "if", "param", "is", "not", "None", ":", "gen_scale", "=", "param", ".", "float_value", "self", ".", "_handle", ".", "seek", "(", "(", "self", ".", "header", ".", "data_block", "-", "1", ")", "*", "512", ")", "for", "frame_no", "in", "range", "(", "self", ".", "first_frame", "(", ")", ",", "self", ".", "last_frame", "(", ")", "+", "1", ")", ":", "n", "=", "4", "*", "self", ".", "header", ".", "point_count", "raw", "=", "np", ".", "fromstring", "(", "self", ".", "_handle", ".", "read", "(", "n", "*", "point_bytes", ")", ",", "dtype", "=", "point_dtype", ",", "count", "=", "n", ")", ".", "reshape", "(", "(", "self", ".", "point_used", ",", "4", ")", ")", "points", "[", ":", ",", ":", "3", "]", "=", "raw", "[", ":", ",", ":", "3", "]", "*", "point_scale", "valid", "=", "raw", "[", ":", ",", "3", "]", ">", "-", "1", "points", "[", "~", "valid", ",", "3", ":", "5", "]", "=", "-", "1", "c", "=", "raw", "[", "valid", ",", "3", "]", ".", "astype", "(", "np", ".", "uint16", ")", "# fourth value is floating-point (scaled) error estimate", "points", "[", "valid", ",", "3", "]", "=", "(", "c", "&", "0xff", ")", ".", "astype", "(", "float", ")", "*", "scale", "# fifth value is number of bits set in camera-observation byte", "points", "[", "valid", ",", "4", "]", "=", "sum", "(", "(", "c", "&", "(", "1", "<<", "k", ")", ")", ">>", "k", "for", "k", "in", "range", "(", "8", ",", "17", ")", ")", "if", "self", ".", "header", ".", "analog_count", ">", "0", ":", "n", "=", "self", ".", "header", ".", "analog_count", "raw", "=", "np", ".", "fromstring", "(", "self", ".", "_handle", ".", "read", "(", "n", "*", "analog_bytes", ")", ",", "dtype", "=", "analog_dtype", ",", "count", "=", "n", ")", ".", "reshape", "(", "(", "-", "1", ",", "self", ".", "analog_used", ")", ")", ".", "T", "analog", "=", "(", "raw", ".", "astype", "(", "float", ")", "-", "offsets", ")", "*", "scales", "*", "gen_scale", "if", "copy", ":", "yield", "frame_no", ",", "points", ".", "copy", "(", ")", ",", "analog", ".", "copy", "(", ")", "else", ":", "yield", "frame_no", ",", "points", ",", "analog" ]
42.395833
0.001441
def check_api_version(self): """ Self check that the client expects the api version used by the server. /status/ is available without authentication so it will not interfere with hello. """ url = self.base_url + "/status/" juicer.utils.Log.log_debug("[REST:GET:%s]", url) _r = requests.get(url, auth=self.auth, headers=self.headers, verify=False) if _r.status_code == Constants.PULP_GET_OK: # server is up, cool. version = juicer.utils.load_json_str(_r.content)['api_version'].strip() if version != Constants.EXPECTED_SERVER_VERSION: # we done goofed raise JuicerPulpError("Client expects %s and got %s -- you should probably update!" \ % (Constants.EXPECTED_SERVER_VERSION, version)) return True
[ "def", "check_api_version", "(", "self", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"/status/\"", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"[REST:GET:%s]\"", ",", "url", ")", "_r", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "self", ".", "auth", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "False", ")", "if", "_r", ".", "status_code", "==", "Constants", ".", "PULP_GET_OK", ":", "# server is up, cool.", "version", "=", "juicer", ".", "utils", ".", "load_json_str", "(", "_r", ".", "content", ")", "[", "'api_version'", "]", ".", "strip", "(", ")", "if", "version", "!=", "Constants", ".", "EXPECTED_SERVER_VERSION", ":", "# we done goofed", "raise", "JuicerPulpError", "(", "\"Client expects %s and got %s -- you should probably update!\"", "%", "(", "Constants", ".", "EXPECTED_SERVER_VERSION", ",", "version", ")", ")", "return", "True" ]
48.166667
0.006787
def pixel(self, func:PixelFunc, *args, **kwargs)->'Image': "Equivalent to `image.px = func(image.px)`." self.px = func(self.px, *args, **kwargs) return self
[ "def", "pixel", "(", "self", ",", "func", ":", "PixelFunc", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "'Image'", ":", "self", ".", "px", "=", "func", "(", "self", ".", "px", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self" ]
44.25
0.022222
def getArguments(names, local_dict=None, global_dict=None): """Get the arguments based on the names.""" call_frame = sys._getframe(2) clear_local_dict = False if local_dict is None: local_dict = call_frame.f_locals clear_local_dict = True try: frame_globals = call_frame.f_globals if global_dict is None: global_dict = frame_globals # If `call_frame` is the top frame of the interpreter we can't clear its # `local_dict`, because it is actually the `global_dict`. clear_local_dict = clear_local_dict and not frame_globals is local_dict arguments = [] for name in names: try: a = local_dict[name] except KeyError: a = global_dict[name] arguments.append(numpy.asarray(a)) finally: # If we generated local_dict via an explicit reference to f_locals, # clear the dict to prevent creating extra ref counts in the caller's scope # See https://github.com/pydata/numexpr/issues/310 if clear_local_dict: local_dict.clear() return arguments
[ "def", "getArguments", "(", "names", ",", "local_dict", "=", "None", ",", "global_dict", "=", "None", ")", ":", "call_frame", "=", "sys", ".", "_getframe", "(", "2", ")", "clear_local_dict", "=", "False", "if", "local_dict", "is", "None", ":", "local_dict", "=", "call_frame", ".", "f_locals", "clear_local_dict", "=", "True", "try", ":", "frame_globals", "=", "call_frame", ".", "f_globals", "if", "global_dict", "is", "None", ":", "global_dict", "=", "frame_globals", "# If `call_frame` is the top frame of the interpreter we can't clear its ", "# `local_dict`, because it is actually the `global_dict`.", "clear_local_dict", "=", "clear_local_dict", "and", "not", "frame_globals", "is", "local_dict", "arguments", "=", "[", "]", "for", "name", "in", "names", ":", "try", ":", "a", "=", "local_dict", "[", "name", "]", "except", "KeyError", ":", "a", "=", "global_dict", "[", "name", "]", "arguments", ".", "append", "(", "numpy", ".", "asarray", "(", "a", ")", ")", "finally", ":", "# If we generated local_dict via an explicit reference to f_locals,", "# clear the dict to prevent creating extra ref counts in the caller's scope", "# See https://github.com/pydata/numexpr/issues/310", "if", "clear_local_dict", ":", "local_dict", ".", "clear", "(", ")", "return", "arguments" ]
35.25
0.004314
def map_routes( feed: "Feed", route_ids: List[str], date: Optional[str] = None, color_palette: List[str] = cs.COLORS_SET2, *, include_stops: bool = True, ): """ Return a Folium map showing the given routes and (optionally) their stops. Parameters ---------- feed : Feed route_ids : list IDs of routes in ``feed.routes`` date : string YYYYMMDD date string restricting the output to trips active on the date color_palette : list Palette to use to color the routes. If more routes than colors, then colors will be recycled. include_stops : boolean If ``True``, then include stops in the map Returns ------- dictionary A Folium Map depicting the distinct shapes of the trips on each route. If ``include_stops``, then include the stops for each route. Notes ------ - Requires Folium """ import folium as fl # Get routes slice and convert to dictionary routes = ( feed.routes.loc[lambda x: x["route_id"].isin(route_ids)] .fillna("n/a") .to_dict(orient="records") ) # Create route colors n = len(routes) colors = [color_palette[i % len(color_palette)] for i in range(n)] # Initialize map my_map = fl.Map(tiles="cartodbpositron") # Collect route bounding boxes to set map zoom later bboxes = [] # Create a feature group for each route and add it to the map for i, route in enumerate(routes): collection = feed.route_to_geojson( route_id=route["route_id"], date=date, include_stops=include_stops ) group = fl.FeatureGroup(name="Route " + route["route_short_name"]) color = colors[i] for f in collection["features"]: prop = f["properties"] # Add stop if f["geometry"]["type"] == "Point": lon, lat = f["geometry"]["coordinates"] fl.CircleMarker( location=[lat, lon], radius=8, fill=True, color=color, weight=1, popup=fl.Popup(hp.make_html(prop)), ).add_to(group) # Add path else: prop["color"] = color path = fl.GeoJson( f, name=route, style_function=lambda x: { "color": x["properties"]["color"] }, ) path.add_child(fl.Popup(hp.make_html(prop))) path.add_to(group) bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds)) group.add_to(my_map) fl.LayerControl().add_to(my_map) # Fit map to bounds bounds = so.unary_union(bboxes).bounds bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering my_map.fit_bounds(bounds2) return my_map
[ "def", "map_routes", "(", "feed", ":", "\"Feed\"", ",", "route_ids", ":", "List", "[", "str", "]", ",", "date", ":", "Optional", "[", "str", "]", "=", "None", ",", "color_palette", ":", "List", "[", "str", "]", "=", "cs", ".", "COLORS_SET2", ",", "*", ",", "include_stops", ":", "bool", "=", "True", ",", ")", ":", "import", "folium", "as", "fl", "# Get routes slice and convert to dictionary", "routes", "=", "(", "feed", ".", "routes", ".", "loc", "[", "lambda", "x", ":", "x", "[", "\"route_id\"", "]", ".", "isin", "(", "route_ids", ")", "]", ".", "fillna", "(", "\"n/a\"", ")", ".", "to_dict", "(", "orient", "=", "\"records\"", ")", ")", "# Create route colors", "n", "=", "len", "(", "routes", ")", "colors", "=", "[", "color_palette", "[", "i", "%", "len", "(", "color_palette", ")", "]", "for", "i", "in", "range", "(", "n", ")", "]", "# Initialize map", "my_map", "=", "fl", ".", "Map", "(", "tiles", "=", "\"cartodbpositron\"", ")", "# Collect route bounding boxes to set map zoom later", "bboxes", "=", "[", "]", "# Create a feature group for each route and add it to the map", "for", "i", ",", "route", "in", "enumerate", "(", "routes", ")", ":", "collection", "=", "feed", ".", "route_to_geojson", "(", "route_id", "=", "route", "[", "\"route_id\"", "]", ",", "date", "=", "date", ",", "include_stops", "=", "include_stops", ")", "group", "=", "fl", ".", "FeatureGroup", "(", "name", "=", "\"Route \"", "+", "route", "[", "\"route_short_name\"", "]", ")", "color", "=", "colors", "[", "i", "]", "for", "f", "in", "collection", "[", "\"features\"", "]", ":", "prop", "=", "f", "[", "\"properties\"", "]", "# Add stop", "if", "f", "[", "\"geometry\"", "]", "[", "\"type\"", "]", "==", "\"Point\"", ":", "lon", ",", "lat", "=", "f", "[", "\"geometry\"", "]", "[", "\"coordinates\"", "]", "fl", ".", "CircleMarker", "(", "location", "=", "[", "lat", ",", "lon", "]", ",", "radius", "=", "8", ",", "fill", "=", "True", ",", "color", "=", "color", ",", "weight", "=", "1", ",", "popup", "=", "fl", ".", "Popup", "(", "hp", ".", "make_html", "(", "prop", ")", ")", ",", ")", ".", "add_to", "(", "group", ")", "# Add path", "else", ":", "prop", "[", "\"color\"", "]", "=", "color", "path", "=", "fl", ".", "GeoJson", "(", "f", ",", "name", "=", "route", ",", "style_function", "=", "lambda", "x", ":", "{", "\"color\"", ":", "x", "[", "\"properties\"", "]", "[", "\"color\"", "]", "}", ",", ")", "path", ".", "add_child", "(", "fl", ".", "Popup", "(", "hp", ".", "make_html", "(", "prop", ")", ")", ")", "path", ".", "add_to", "(", "group", ")", "bboxes", ".", "append", "(", "sg", ".", "box", "(", "*", "sg", ".", "shape", "(", "f", "[", "\"geometry\"", "]", ")", ".", "bounds", ")", ")", "group", ".", "add_to", "(", "my_map", ")", "fl", ".", "LayerControl", "(", ")", ".", "add_to", "(", "my_map", ")", "# Fit map to bounds", "bounds", "=", "so", ".", "unary_union", "(", "bboxes", ")", ".", "bounds", "bounds2", "=", "[", "bounds", "[", "1", ":", ":", "-", "1", "]", ",", "bounds", "[", "3", ":", "1", ":", "-", "1", "]", "]", "# Folium expects this ordering", "my_map", ".", "fit_bounds", "(", "bounds2", ")", "return", "my_map" ]
27.903846
0.000333
def parse_keyvalue(parser, event, node): #pylint: disable=unused-argument """Parse CIM/CML KEYVALUE element and return key value based on VALUETYPE or TYPE (future) information """ valuetype = _get_required_attribute(node, 'VALUETYPE') # TODO 2/16 KS: Type attribute not used. Extend to use. Type was late # extension to spec to allow real types. cim_type = _get_attribute(node, 'TYPE') # pylint: disable=unused-variable (next_event, next_node) = six.next(parser) if next_event != pulldom.CHARACTERS: raise ParseError('Expecting character data') value = next_node.nodeValue if valuetype == 'string': pass elif valuetype == 'boolean': # CIM-XML says "These values MUST be treated as # case-insensitive" (even though the XML definition # requires them to be lowercase.) p = value.strip().lower() if p == 'true': value = True elif p == 'false': value = False else: raise ParseError('invalid boolean value %r' % p) elif valuetype == 'numeric': try: # XXX: Use TYPE attribute to create named CIM type. # if 'TYPE' in attrs(tt): # return tocimobj(attrs(tt)['TYPE'], p.strip()) # XXX: Would like to use long() here, but that tends to cause # trouble when it's written back out as '2L' # pylint: disable=redefined-variable-type # Redefined from bool to int # pylint: disable=redefined-variable-type value = int(value.strip(), 0) except ValueError: raise ParseError( 'invalid numeric value "%s"' % value) else: raise ParseError('Invalid VALUETYPE') _get_end_event(parser, 'KEYVALUE') return value
[ "def", "parse_keyvalue", "(", "parser", ",", "event", ",", "node", ")", ":", "#pylint: disable=unused-argument", "valuetype", "=", "_get_required_attribute", "(", "node", ",", "'VALUETYPE'", ")", "# TODO 2/16 KS: Type attribute not used. Extend to use. Type was late", "# extension to spec to allow real types.", "cim_type", "=", "_get_attribute", "(", "node", ",", "'TYPE'", ")", "# pylint: disable=unused-variable", "(", "next_event", ",", "next_node", ")", "=", "six", ".", "next", "(", "parser", ")", "if", "next_event", "!=", "pulldom", ".", "CHARACTERS", ":", "raise", "ParseError", "(", "'Expecting character data'", ")", "value", "=", "next_node", ".", "nodeValue", "if", "valuetype", "==", "'string'", ":", "pass", "elif", "valuetype", "==", "'boolean'", ":", "# CIM-XML says \"These values MUST be treated as", "# case-insensitive\" (even though the XML definition", "# requires them to be lowercase.)", "p", "=", "value", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "p", "==", "'true'", ":", "value", "=", "True", "elif", "p", "==", "'false'", ":", "value", "=", "False", "else", ":", "raise", "ParseError", "(", "'invalid boolean value %r'", "%", "p", ")", "elif", "valuetype", "==", "'numeric'", ":", "try", ":", "# XXX: Use TYPE attribute to create named CIM type.", "# if 'TYPE' in attrs(tt):", "# return tocimobj(attrs(tt)['TYPE'], p.strip())", "# XXX: Would like to use long() here, but that tends to cause", "# trouble when it's written back out as '2L'", "# pylint: disable=redefined-variable-type", "# Redefined from bool to int", "# pylint: disable=redefined-variable-type", "value", "=", "int", "(", "value", ".", "strip", "(", ")", ",", "0", ")", "except", "ValueError", ":", "raise", "ParseError", "(", "'invalid numeric value \"%s\"'", "%", "value", ")", "else", ":", "raise", "ParseError", "(", "'Invalid VALUETYPE'", ")", "_get_end_event", "(", "parser", ",", "'KEYVALUE'", ")", "return", "value" ]
29.508197
0.001613
def check_predict_status(self, view_id, predict_request_id): """ Returns a string indicating the status of the prediction job :param view_id: The data view id returned from data view create :param predict_request_id: The id returned from predict :return: Status data, also includes results if state is finished """ failure_message = "Get status on predict failed" bare_response = self._get_success_json(self._get( 'v1/data_views/' + str(view_id) + '/predict/' + str(predict_request_id) + '/status', None, failure_message=failure_message)) result = bare_response["data"] # result.update({"message": bare_response["message"]}) return result
[ "def", "check_predict_status", "(", "self", ",", "view_id", ",", "predict_request_id", ")", ":", "failure_message", "=", "\"Get status on predict failed\"", "bare_response", "=", "self", ".", "_get_success_json", "(", "self", ".", "_get", "(", "'v1/data_views/'", "+", "str", "(", "view_id", ")", "+", "'/predict/'", "+", "str", "(", "predict_request_id", ")", "+", "'/status'", ",", "None", ",", "failure_message", "=", "failure_message", ")", ")", "result", "=", "bare_response", "[", "\"data\"", "]", "# result.update({\"message\": bare_response[\"message\"]})", "return", "result" ]
38.789474
0.003974
def check_errors(self, response): " Check some common errors." # Read content. content = response.content if 'status' not in content: raise self.GeneralError('We expect a status field.') # Return the decoded content if status is success. if content['status'] == 'success': response._content = content return # Expect messages if some kind of error. if 'msgs' not in content: raise self.GeneralError('We expcet messages in case of error.') try: messages = list(content['msgs']) except: raise self.GeneralError("Messages must be a list.") # Try to found common errors in the response. for msg in messages: if 'LVL' in msg and msg['LVL'] == 'ERROR': # Check if is a not found error. if msg['ERR_CD'] == 'NOT_FOUND': raise self.NotFoundError(msg['INFO']) # Duplicated target. elif msg['ERR_CD'] == 'TARGET_EXISTS': raise self.TargetExistsError(msg['INFO']) # Some other error. else: raise self.DynectError(msg['INFO']) raise self.GeneralError("We need at least one error message.")
[ "def", "check_errors", "(", "self", ",", "response", ")", ":", "# Read content.", "content", "=", "response", ".", "content", "if", "'status'", "not", "in", "content", ":", "raise", "self", ".", "GeneralError", "(", "'We expect a status field.'", ")", "# Return the decoded content if status is success.", "if", "content", "[", "'status'", "]", "==", "'success'", ":", "response", ".", "_content", "=", "content", "return", "# Expect messages if some kind of error.", "if", "'msgs'", "not", "in", "content", ":", "raise", "self", ".", "GeneralError", "(", "'We expcet messages in case of error.'", ")", "try", ":", "messages", "=", "list", "(", "content", "[", "'msgs'", "]", ")", "except", ":", "raise", "self", ".", "GeneralError", "(", "\"Messages must be a list.\"", ")", "# Try to found common errors in the response.", "for", "msg", "in", "messages", ":", "if", "'LVL'", "in", "msg", "and", "msg", "[", "'LVL'", "]", "==", "'ERROR'", ":", "# Check if is a not found error.", "if", "msg", "[", "'ERR_CD'", "]", "==", "'NOT_FOUND'", ":", "raise", "self", ".", "NotFoundError", "(", "msg", "[", "'INFO'", "]", ")", "# Duplicated target.", "elif", "msg", "[", "'ERR_CD'", "]", "==", "'TARGET_EXISTS'", ":", "raise", "self", ".", "TargetExistsError", "(", "msg", "[", "'INFO'", "]", ")", "# Some other error.", "else", ":", "raise", "self", ".", "DynectError", "(", "msg", "[", "'INFO'", "]", ")", "raise", "self", ".", "GeneralError", "(", "\"We need at least one error message.\"", ")" ]
31.512195
0.002252
def delete(self, features, make_backup=True, **kwargs): """ Delete features from database. features : str, iterable, FeatureDB instance If FeatureDB, all features will be used. If string, assume it's the ID of the feature to remove. Otherwise, assume it's an iterable of Feature objects. The classes in gffutils.iterators may be helpful in this case. make_backup : bool If True, and the database you're about to update is a file on disk, makes a copy of the existing database and saves it with a .bak extension. Returns ------- FeatureDB object, with features deleted. """ if make_backup: if isinstance(self.dbfn, six.string_types): shutil.copy2(self.dbfn, self.dbfn + '.bak') c = self.conn.cursor() query1 = """ DELETE FROM features WHERE id = ? """ query2 = """ DELETE FROM relations WHERE parent = ? OR child = ? """ if isinstance(features, FeatureDB): features = features.all_features() if isinstance(features, six.string_types): features = [features] if isinstance(features, Feature): features = [features] for feature in features: if isinstance(feature, six.string_types): _id = feature else: _id = feature.id c.execute(query1, (_id,)) c.execute(query2, (_id, _id)) self.conn.commit() return self
[ "def", "delete", "(", "self", ",", "features", ",", "make_backup", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "make_backup", ":", "if", "isinstance", "(", "self", ".", "dbfn", ",", "six", ".", "string_types", ")", ":", "shutil", ".", "copy2", "(", "self", ".", "dbfn", ",", "self", ".", "dbfn", "+", "'.bak'", ")", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "query1", "=", "\"\"\"\n DELETE FROM features WHERE id = ?\n \"\"\"", "query2", "=", "\"\"\"\n DELETE FROM relations WHERE parent = ? OR child = ?\n \"\"\"", "if", "isinstance", "(", "features", ",", "FeatureDB", ")", ":", "features", "=", "features", ".", "all_features", "(", ")", "if", "isinstance", "(", "features", ",", "six", ".", "string_types", ")", ":", "features", "=", "[", "features", "]", "if", "isinstance", "(", "features", ",", "Feature", ")", ":", "features", "=", "[", "features", "]", "for", "feature", "in", "features", ":", "if", "isinstance", "(", "feature", ",", "six", ".", "string_types", ")", ":", "_id", "=", "feature", "else", ":", "_id", "=", "feature", ".", "id", "c", ".", "execute", "(", "query1", ",", "(", "_id", ",", ")", ")", "c", ".", "execute", "(", "query2", ",", "(", "_id", ",", "_id", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "return", "self" ]
34.8
0.001242
def setInverted(self, state): """ Sets whether or not to invert the check state for collapsing. :param state | <bool> """ collapsed = self.isCollapsed() self._inverted = state if self.isCollapsible(): self.setCollapsed(collapsed)
[ "def", "setInverted", "(", "self", ",", "state", ")", ":", "collapsed", "=", "self", ".", "isCollapsed", "(", ")", "self", ".", "_inverted", "=", "state", "if", "self", ".", "isCollapsible", "(", ")", ":", "self", ".", "setCollapsed", "(", "collapsed", ")" ]
31.1
0.009375
def levels(self, with_mem=True): """Return cache levels, optionally including main memory.""" p = self.first_level while p is not None: yield p # FIXME bad hack to include victim caches, need a more general solution, probably # involving recursive tree walking if p.victims_to is not None and p.victims_to != p.load_from: yield p.victims_to if p.store_to is not None and p.store_to != p.load_from and p.store_to != p.victims_to: yield p.store_to p = p.load_from if with_mem: yield self.main_memory
[ "def", "levels", "(", "self", ",", "with_mem", "=", "True", ")", ":", "p", "=", "self", ".", "first_level", "while", "p", "is", "not", "None", ":", "yield", "p", "# FIXME bad hack to include victim caches, need a more general solution, probably", "# involving recursive tree walking", "if", "p", ".", "victims_to", "is", "not", "None", "and", "p", ".", "victims_to", "!=", "p", ".", "load_from", ":", "yield", "p", ".", "victims_to", "if", "p", ".", "store_to", "is", "not", "None", "and", "p", ".", "store_to", "!=", "p", ".", "load_from", "and", "p", ".", "store_to", "!=", "p", ".", "victims_to", ":", "yield", "p", ".", "store_to", "p", "=", "p", ".", "load_from", "if", "with_mem", ":", "yield", "self", ".", "main_memory" ]
42.133333
0.006192
def set_resolved_url(self, item=None, subtitles=None): '''Takes a url or a listitem to be played. Used in conjunction with a playable list item with a path that calls back into your addon. :param item: A playable list item or url. Pass None to alert XBMC of a failure to resolve the item. .. warning:: When using set_resolved_url you should ensure the initial playable item (which calls back into your addon) doesn't have a trailing slash in the URL. Otherwise it won't work reliably with XBMC's PlayMedia(). :param subtitles: A URL to a remote subtitles file or a local filename for a subtitles file to be played along with the item. ''' if self._end_of_directory: raise Exception('Current XBMC handle has been removed. Either ' 'set_resolved_url(), end_of_directory(), or ' 'finish() has already been called.') self._end_of_directory = True succeeded = True if item is None: # None item indicates the resolve url failed. item = {} succeeded = False if isinstance(item, basestring): # caller is passing a url instead of an item dict item = {'path': item} item = self._listitemify(item) item.set_played(True) xbmcplugin.setResolvedUrl(self.handle, succeeded, item.as_xbmc_listitem()) # call to _add_subtitles must be after setResolvedUrl if subtitles: self._add_subtitles(subtitles) return [item]
[ "def", "set_resolved_url", "(", "self", ",", "item", "=", "None", ",", "subtitles", "=", "None", ")", ":", "if", "self", ".", "_end_of_directory", ":", "raise", "Exception", "(", "'Current XBMC handle has been removed. Either '", "'set_resolved_url(), end_of_directory(), or '", "'finish() has already been called.'", ")", "self", ".", "_end_of_directory", "=", "True", "succeeded", "=", "True", "if", "item", "is", "None", ":", "# None item indicates the resolve url failed.", "item", "=", "{", "}", "succeeded", "=", "False", "if", "isinstance", "(", "item", ",", "basestring", ")", ":", "# caller is passing a url instead of an item dict", "item", "=", "{", "'path'", ":", "item", "}", "item", "=", "self", ".", "_listitemify", "(", "item", ")", "item", ".", "set_played", "(", "True", ")", "xbmcplugin", ".", "setResolvedUrl", "(", "self", ".", "handle", ",", "succeeded", ",", "item", ".", "as_xbmc_listitem", "(", ")", ")", "# call to _add_subtitles must be after setResolvedUrl", "if", "subtitles", ":", "self", ".", "_add_subtitles", "(", "subtitles", ")", "return", "[", "item", "]" ]
43.756098
0.001091
def ends_with(self, other): '''(<) Ends with a specified parser, and at the end parser hasn't consumed any input.''' @Parser def ends_with_parser(text, index): res = self(text, index) if not res.status: return res end = other(text, res.index) if end.status: return res else: return Value.failure(end.index, 'ends with {}'.format(end.expected)) return ends_with_parser
[ "def", "ends_with", "(", "self", ",", "other", ")", ":", "@", "Parser", "def", "ends_with_parser", "(", "text", ",", "index", ")", ":", "res", "=", "self", "(", "text", ",", "index", ")", "if", "not", "res", ".", "status", ":", "return", "res", "end", "=", "other", "(", "text", ",", "res", ".", "index", ")", "if", "end", ".", "status", ":", "return", "res", "else", ":", "return", "Value", ".", "failure", "(", "end", ".", "index", ",", "'ends with {}'", ".", "format", "(", "end", ".", "expected", ")", ")", "return", "ends_with_parser" ]
35.857143
0.007767
def get_owner(self, default=True): """Return (User ID, Group ID) tuple :param bool default: Whether to return default if not set. :rtype: tuple[int, int] """ uid, gid = self.owner if not uid and default: uid = os.getuid() if not gid and default: gid = os.getgid() return uid, gid
[ "def", "get_owner", "(", "self", ",", "default", "=", "True", ")", ":", "uid", ",", "gid", "=", "self", ".", "owner", "if", "not", "uid", "and", "default", ":", "uid", "=", "os", ".", "getuid", "(", ")", "if", "not", "gid", "and", "default", ":", "gid", "=", "os", ".", "getgid", "(", ")", "return", "uid", ",", "gid" ]
23.8
0.005391
def remove_whitespace(text_string): ''' Removes all whitespace found within text_string and returns new string as type str. Keyword argument: - text_string: string instance Exceptions raised: - InputError: occurs should a string or NoneType not be passed as an argument ''' if text_string is None or text_string == "": return "" elif isinstance(text_string, str): return " ".join(text_string.split()) else: raise InputError("none type or string not passed as an argument")
[ "def", "remove_whitespace", "(", "text_string", ")", ":", "if", "text_string", "is", "None", "or", "text_string", "==", "\"\"", ":", "return", "\"\"", "elif", "isinstance", "(", "text_string", ",", "str", ")", ":", "return", "\" \"", ".", "join", "(", "text_string", ".", "split", "(", ")", ")", "else", ":", "raise", "InputError", "(", "\"none type or string not passed as an argument\"", ")" ]
29.055556
0.005556
def all_solidity_variables_used_as_args(self): """ Return the Soldiity variables directly used in a call Use of the IR to filter index access Used to catch check(msg.sender) """ if self._all_solidity_variables_used_as_args is None: self._all_solidity_variables_used_as_args = self._explore_functions( lambda x: self._explore_func_nodes(x, self._solidity_variable_in_internal_calls)) return self._all_solidity_variables_used_as_args
[ "def", "all_solidity_variables_used_as_args", "(", "self", ")", ":", "if", "self", ".", "_all_solidity_variables_used_as_args", "is", "None", ":", "self", ".", "_all_solidity_variables_used_as_args", "=", "self", ".", "_explore_functions", "(", "lambda", "x", ":", "self", ".", "_explore_func_nodes", "(", "x", ",", "self", ".", "_solidity_variable_in_internal_calls", ")", ")", "return", "self", ".", "_all_solidity_variables_used_as_args" ]
47.090909
0.007576
def targets(self, module): """Find the targets for a given module. Returns: list: A sequence of all of the targets for the specified module. """ if module not in self.module_targets: raise BuildError("Could not find module in targets()", module=module) return [self.find(x, module) for x in self.module_targets[module]]
[ "def", "targets", "(", "self", ",", "module", ")", ":", "if", "module", "not", "in", "self", ".", "module_targets", ":", "raise", "BuildError", "(", "\"Could not find module in targets()\"", ",", "module", "=", "module", ")", "return", "[", "self", ".", "find", "(", "x", ",", "module", ")", "for", "x", "in", "self", ".", "module_targets", "[", "module", "]", "]" ]
34.181818
0.007772
def workflow( graph: BELGraph, node: BaseEntity, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, minimum_nodes: int = 1, ) -> List['Runner']: """Generate candidate mechanisms and run the heat diffusion workflow. :param graph: A BEL graph :param node: The BEL node that is the focus of this analysis :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion :return: A list of runners """ subgraph = generate_mechanism(graph, node, key=key) if subgraph.number_of_nodes() <= minimum_nodes: return [] runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs) return list(runners)
[ "def", "workflow", "(", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ",", "key", ":", "Optional", "[", "str", "]", "=", "None", ",", "tag", ":", "Optional", "[", "str", "]", "=", "None", ",", "default_score", ":", "Optional", "[", "float", "]", "=", "None", ",", "runs", ":", "Optional", "[", "int", "]", "=", "None", ",", "minimum_nodes", ":", "int", "=", "1", ",", ")", "->", "List", "[", "'Runner'", "]", ":", "subgraph", "=", "generate_mechanism", "(", "graph", ",", "node", ",", "key", "=", "key", ")", "if", "subgraph", ".", "number_of_nodes", "(", ")", "<=", "minimum_nodes", ":", "return", "[", "]", "runners", "=", "multirun", "(", "subgraph", ",", "node", ",", "key", "=", "key", ",", "tag", "=", "tag", ",", "default_score", "=", "default_score", ",", "runs", "=", "runs", ")", "return", "list", "(", "runners", ")" ]
44.071429
0.005551
def _repr_html_(self, **kwargs): """Displays the HTML Map in a Jupyter notebook.""" if self._parent is None: self.add_to(Figure()) out = self._parent._repr_html_(**kwargs) self._parent = None else: out = self._parent._repr_html_(**kwargs) return out
[ "def", "_repr_html_", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_parent", "is", "None", ":", "self", ".", "add_to", "(", "Figure", "(", ")", ")", "out", "=", "self", ".", "_parent", ".", "_repr_html_", "(", "*", "*", "kwargs", ")", "self", ".", "_parent", "=", "None", "else", ":", "out", "=", "self", ".", "_parent", ".", "_repr_html_", "(", "*", "*", "kwargs", ")", "return", "out" ]
35.666667
0.006079
def locked_context(self, key=None, default=dict): """ Executor context is a shared memory object. All workers share this. It needs a lock. Its used like this: with executor.context() as context: visited = context['visited'] visited.append(state.cpu.PC) context['visited'] = visited """ assert default in (list, dict, set) with self._lock: if key is None: yield self._shared_context else: sub_context = self._shared_context.get(key, None) if sub_context is None: sub_context = default() yield sub_context self._shared_context[key] = sub_context
[ "def", "locked_context", "(", "self", ",", "key", "=", "None", ",", "default", "=", "dict", ")", ":", "assert", "default", "in", "(", "list", ",", "dict", ",", "set", ")", "with", "self", ".", "_lock", ":", "if", "key", "is", "None", ":", "yield", "self", ".", "_shared_context", "else", ":", "sub_context", "=", "self", ".", "_shared_context", ".", "get", "(", "key", ",", "None", ")", "if", "sub_context", "is", "None", ":", "sub_context", "=", "default", "(", ")", "yield", "sub_context", "self", ".", "_shared_context", "[", "key", "]", "=", "sub_context" ]
39.684211
0.002591
def store_sentry(self, username, sentry_bytes): """ Store sentry bytes under a username :param username: username :type username: :class:`str` :return: Whenver the operation succeed :rtype: :class:`bool` """ filepath = self._get_sentry_path(username) if filepath: try: with open(filepath, 'wb') as f: f.write(sentry_bytes) return True except IOError as e: self._LOG.error("store_sentry: %s" % str(e)) return False
[ "def", "store_sentry", "(", "self", ",", "username", ",", "sentry_bytes", ")", ":", "filepath", "=", "self", ".", "_get_sentry_path", "(", "username", ")", "if", "filepath", ":", "try", ":", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "sentry_bytes", ")", "return", "True", "except", "IOError", "as", "e", ":", "self", ".", "_LOG", ".", "error", "(", "\"store_sentry: %s\"", "%", "str", "(", "e", ")", ")", "return", "False" ]
29.947368
0.003407
def add_tag(self, tag): """ add a tag to the tag list """ if tag not in self._tags: self._tags[tag] = dict()
[ "def", "add_tag", "(", "self", ",", "tag", ")", ":", "if", "tag", "not", "in", "self", ".", "_tags", ":", "self", ".", "_tags", "[", "tag", "]", "=", "dict", "(", ")" ]
33.25
0.014706
def iterate(self, src, tgt, update=True, training=True): """ Performs one iteration of the training/validation. :param src: batch of examples from the source language :param tgt: batch of examples from the target language :param update: if True: optimizer does update of the weights :param training: if True: executes optimizer """ src, src_length = src tgt, tgt_length = tgt src_length = torch.LongTensor(src_length) tgt_length = torch.LongTensor(tgt_length) num_toks = {} num_toks['tgt'] = int(sum(tgt_length - 1)) num_toks['src'] = int(sum(src_length)) if self.cuda: src = src.cuda() src_length = src_length.cuda() tgt = tgt.cuda() if self.batch_first: output = self.model(src, src_length, tgt[:, :-1]) tgt_labels = tgt[:, 1:] T, B = output.size(1), output.size(0) else: output = self.model(src, src_length, tgt[:-1]) tgt_labels = tgt[1:] T, B = output.size(0), output.size(1) loss = self.criterion(output.view(T * B, -1), tgt_labels.contiguous().view(-1)) loss_per_batch = loss.item() loss /= (B * self.iter_size) if training: self.fp_optimizer.step(loss, self.optimizer, self.scheduler, update) loss_per_token = loss_per_batch / num_toks['tgt'] loss_per_sentence = loss_per_batch / B return loss_per_token, loss_per_sentence, num_toks
[ "def", "iterate", "(", "self", ",", "src", ",", "tgt", ",", "update", "=", "True", ",", "training", "=", "True", ")", ":", "src", ",", "src_length", "=", "src", "tgt", ",", "tgt_length", "=", "tgt", "src_length", "=", "torch", ".", "LongTensor", "(", "src_length", ")", "tgt_length", "=", "torch", ".", "LongTensor", "(", "tgt_length", ")", "num_toks", "=", "{", "}", "num_toks", "[", "'tgt'", "]", "=", "int", "(", "sum", "(", "tgt_length", "-", "1", ")", ")", "num_toks", "[", "'src'", "]", "=", "int", "(", "sum", "(", "src_length", ")", ")", "if", "self", ".", "cuda", ":", "src", "=", "src", ".", "cuda", "(", ")", "src_length", "=", "src_length", ".", "cuda", "(", ")", "tgt", "=", "tgt", ".", "cuda", "(", ")", "if", "self", ".", "batch_first", ":", "output", "=", "self", ".", "model", "(", "src", ",", "src_length", ",", "tgt", "[", ":", ",", ":", "-", "1", "]", ")", "tgt_labels", "=", "tgt", "[", ":", ",", "1", ":", "]", "T", ",", "B", "=", "output", ".", "size", "(", "1", ")", ",", "output", ".", "size", "(", "0", ")", "else", ":", "output", "=", "self", ".", "model", "(", "src", ",", "src_length", ",", "tgt", "[", ":", "-", "1", "]", ")", "tgt_labels", "=", "tgt", "[", "1", ":", "]", "T", ",", "B", "=", "output", ".", "size", "(", "0", ")", ",", "output", ".", "size", "(", "1", ")", "loss", "=", "self", ".", "criterion", "(", "output", ".", "view", "(", "T", "*", "B", ",", "-", "1", ")", ",", "tgt_labels", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", ")", "loss_per_batch", "=", "loss", ".", "item", "(", ")", "loss", "/=", "(", "B", "*", "self", ".", "iter_size", ")", "if", "training", ":", "self", ".", "fp_optimizer", ".", "step", "(", "loss", ",", "self", ".", "optimizer", ",", "self", ".", "scheduler", ",", "update", ")", "loss_per_token", "=", "loss_per_batch", "/", "num_toks", "[", "'tgt'", "]", "loss_per_sentence", "=", "loss_per_batch", "/", "B", "return", "loss_per_token", ",", "loss_per_sentence", ",", "num_toks" ]
34.347826
0.001231
def OnButtonCell(self, event): """Event handler for cell button toggle button""" if self.button_cell_button_id == event.GetId(): if event.IsChecked(): label = self._get_button_label() post_command_event(self, self.ButtonCellMsg, text=label) else: post_command_event(self, self.ButtonCellMsg, text=False) event.Skip()
[ "def", "OnButtonCell", "(", "self", ",", "event", ")", ":", "if", "self", ".", "button_cell_button_id", "==", "event", ".", "GetId", "(", ")", ":", "if", "event", ".", "IsChecked", "(", ")", ":", "label", "=", "self", ".", "_get_button_label", "(", ")", "post_command_event", "(", "self", ",", "self", ".", "ButtonCellMsg", ",", "text", "=", "label", ")", "else", ":", "post_command_event", "(", "self", ",", "self", ".", "ButtonCellMsg", ",", "text", "=", "False", ")", "event", ".", "Skip", "(", ")" ]
36.727273
0.004831
def fsdecode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None): ''' Decode given path. :param path: path will be decoded if using bytes :type path: bytes or str :param os_name: operative system name, defaults to os.name :type os_name: str :param fs_encoding: current filesystem encoding, defaults to autodetected :type fs_encoding: str :return: decoded path :rtype: str ''' if not isinstance(path, bytes): return path if not errors: use_strict = PY_LEGACY or os_name == 'nt' errors = 'strict' if use_strict else 'surrogateescape' return path.decode(fs_encoding, errors=errors)
[ "def", "fsdecode", "(", "path", ",", "os_name", "=", "os", ".", "name", ",", "fs_encoding", "=", "FS_ENCODING", ",", "errors", "=", "None", ")", ":", "if", "not", "isinstance", "(", "path", ",", "bytes", ")", ":", "return", "path", "if", "not", "errors", ":", "use_strict", "=", "PY_LEGACY", "or", "os_name", "==", "'nt'", "errors", "=", "'strict'", "if", "use_strict", "else", "'surrogateescape'", "return", "path", ".", "decode", "(", "fs_encoding", ",", "errors", "=", "errors", ")" ]
34.210526
0.001497
def main(command_line=True, **kwargs): """ NAME pmd_magic.py DESCRIPTION converts PMD (Enkin) format files to magic_measurements format files SYNTAX pmd_magic.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify input file, or -F FILE: specify output file, default is magic_measurements.txt -Fsa: specify er_samples format file for appending, default is new er_samples.txt -spc NUM : specify number of characters to designate a specimen, default = 1 -loc LOCNAME : specify location/study name -A: don't average replicate measurements -ncn NCON: specify naming convention Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY -lat: Lattitude of site (if no value given assumes 0) -lon: Longitude of site (if no value given assumes 0) -mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. INPUT PMD format files """ # initialize some stuff noave=0 inst="" samp_con,Z='1',"" missing=1 demag="N" er_location_name="unknown" citation='This study' args=sys.argv meth_code="LP-NO" specnum=-1 MagRecs=[] version_num=pmag.get_version() Samps=[] # keeps track of sample orientations DIspec=[] MagFiles=[] user="" mag_file="" dir_path='.' ErSamps=[] SampOuts=[] samp_file = 'er_samples.txt' meas_file = 'magic_measurements.txt' # # get command line arguments # if command_line: if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-ID' in sys.argv: ind = sys.argv.index('-ID') input_dir_path = sys.argv[ind+1] else: input_dir_path = dir_path output_dir_path = dir_path if "-h" in args: print(main.__doc__) return False if '-F' in args: ind=args.index("-F") meas_file = args[ind+1] if '-Fsa' in args: ind = args.index("-Fsa") samp_file = args[ind+1] #try: # open(samp_file,'r') # ErSamps,file_type=pmag.magic_read(samp_file) # print 'sample information will be appended to ', samp_file #except: # print samp_file,' not found: sample information will be stored in new er_samples.txt file' # samp_file = output_dir_path+'/er_samples.txt' if '-f' in args: ind = args.index("-f") mag_file= args[ind+1] if "-spc" in args: ind = args.index("-spc") specnum = int(args[ind+1]) if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-A" in args: noave=1 if "-mcd" in args: ind=args.index("-mcd") meth_code=args[ind+1] if "-lat" in args: ind=args.index("-lat") site_lat=args[ind+1] if "-lon" in args: ind=args.index("-lon") site_lon=args[ind+1] if not command_line: dir_path = kwargs.get('dir_path', '.') input_dir_path = kwargs.get('input_dir_path', dir_path) output_dir_path = dir_path meas_file = kwargs.get('meas_file', 'magic_measurements.txt') mag_file = kwargs.get('mag_file') spec_file = kwargs.get('spec_file', 'er_specimens.txt') samp_file = kwargs.get('samp_file', 'er_samples.txt') site_file = kwargs.get('site_file', 'er_sites.txt') site_lat = kwargs.get('site_lat', 0) site_lon = kwargs.get('site_lon', 0) specnum = kwargs.get('specnum', 0) samp_con = kwargs.get('samp_con', '1') er_location_name = kwargs.get('er_location_name', '') noave = kwargs.get('noave', 0) # default (0) means DO average meth_code = kwargs.get('meth_code', "LP-NO") print(samp_con) # format variables mag_file = os.path.join(input_dir_path,mag_file) meas_file = os.path.join(output_dir_path,meas_file) spec_file = os.path.join(output_dir_path,spec_file) samp_file = os.path.join(output_dir_path,samp_file) site_file = os.path.join(output_dir_path,site_file) if specnum!=0:specnum=-specnum if "4" in samp_con: if "-" not in samp_con: print("naming convention option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="4" if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="7" # parse data data=open(mag_file,'r').readlines() # read in data from file comment=data[0] line=data[1].strip() line=line.replace("=","= ") # make finding orientations easier rec=line.split() # read in sample orientation, etc. er_specimen_name=rec[0] ErSpecRec,ErSampRec,ErSiteRec={},{},{} # make a sample record if specnum!=0: er_sample_name=rec[0][:specnum] else: er_sample_name=rec[0] if len(ErSamps)>0: # need to copy existing for samp in ErSamps: if samp['er_sample_name']==er_sample_name: ErSampRec=samp # we'll ammend this one else: SampOuts.append(samp) # keep all the others if int(samp_con)<6: er_site_name=pmag.parse_site(er_sample_name,samp_con,Z) else: if 'er_site_name' in list(ErSampRec.keys()):er_site_name=ErSampREc['er_site_name'] if 'er_location_name' in list(ErSampRec.keys()):er_location_name=ErSampREc['er_location_name'] az_ind=rec.index('a=')+1 ErSampRec['er_sample_name']=er_sample_name ErSampRec['er_sample_description']=comment ErSampRec['sample_azimuth']=rec[az_ind] dip_ind=rec.index('b=')+1 dip=-float(rec[dip_ind]) ErSampRec['sample_dip']='%7.1f'%(dip) strike_ind=rec.index('s=')+1 ErSampRec['sample_bed_dip_direction']='%7.1f'%(float(rec[strike_ind])+90.) bd_ind=rec.index('d=')+1 ErSampRec['sample_bed_dip']=rec[bd_ind] v_ind=rec.index('v=')+1 vol=rec[v_ind][:-3] date=rec[-2] time=rec[-1] ErSampRec['magic_method_codes']=meth_code if 'er_location_name' not in list(ErSampRec.keys()):ErSampRec['er_location_name']=er_location_name if 'er_site_name' not in list(ErSampRec.keys()):ErSampRec['er_site_name']=er_site_name if 'er_citation_names' not in list(ErSampRec.keys()):ErSampRec['er_citation_names']='This study' if 'magic_method_codes' not in list(ErSampRec.keys()):ErSampRec['magic_method_codes']='SO-NO' ErSpecRec['er_specimen_name'] = er_specimen_name ErSpecRec['er_sample_name'] = er_sample_name ErSpecRec['er_site_name'] = er_site_name ErSpecRec['er_location_name'] = er_location_name ErSpecRec['er_citation_names']='This study' ErSiteRec['er_site_name'] = er_site_name ErSiteRec['er_location_name'] = er_location_name ErSiteRec['er_citation_names']='This study' ErSiteRec['site_lat'] = site_lat ErSiteRec['site_lon']= site_lon SpecOuts.append(ErSpecRec) SampOuts.append(ErSampRec) SiteOuts.append(ErSiteRec) for k in range(3,len(data)): # read in data line=data[k] rec=line.split() if len(rec)>1: # skip blank lines at bottom MagRec={} MagRec['measurement_description']='Date: '+date+' '+time MagRec["er_citation_names"]="This study" MagRec['er_location_name']=er_location_name MagRec['er_site_name']=er_site_name MagRec['er_sample_name']=er_sample_name MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_flag"]='g' MagRec["measurement_standard"]='u' MagRec["measurement_number"]='1' MagRec["er_specimen_name"]=er_specimen_name if rec[0]=='NRM': meas_type="LT-NO" elif rec[0][0]=='M' or rec[0][0]=='H': meas_type="LT-AF-Z" elif rec[0][0]=='T': meas_type="LT-T-Z" else: print("measurement type unknown") return False, "measurement type unknown" X=[float(rec[1]),float(rec[2]),float(rec[3])] Vec=pmag.cart2dir(X) MagRec["measurement_magn_moment"]='%10.3e'% (Vec[2]) # Am^2 MagRec["measurement_magn_volume"]=rec[4] # A/m MagRec["measurement_dec"]='%7.1f'%(Vec[0]) MagRec["measurement_inc"]='%7.1f'%(Vec[1]) MagRec["treatment_ac_field"]='0' if meas_type!='LT-NO': treat=float(rec[0][1:]) else: treat=0 if meas_type=="LT-AF-Z": MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla elif meas_type=="LT-T-Z": MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin MagRec['magic_method_codes']=meas_type MagRecs.append(MagRec) MagOuts=pmag.measurements_methods(MagRecs,noave) pmag.magic_write(meas_file,MagOuts,'magic_measurements') print("results put in ",meas_file) pmag.magic_write(samp_file,SpecOuts,'er_specimens') pmag.magic_write(samp_file,SampOuts,'er_samples') pmag.magic_write(samp_file,SiteOuts,'er_sites') return True, meas_file
[ "def", "main", "(", "command_line", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# initialize some stuff", "noave", "=", "0", "inst", "=", "\"\"", "samp_con", ",", "Z", "=", "'1'", ",", "\"\"", "missing", "=", "1", "demag", "=", "\"N\"", "er_location_name", "=", "\"unknown\"", "citation", "=", "'This study'", "args", "=", "sys", ".", "argv", "meth_code", "=", "\"LP-NO\"", "specnum", "=", "-", "1", "MagRecs", "=", "[", "]", "version_num", "=", "pmag", ".", "get_version", "(", ")", "Samps", "=", "[", "]", "# keeps track of sample orientations", "DIspec", "=", "[", "]", "MagFiles", "=", "[", "]", "user", "=", "\"\"", "mag_file", "=", "\"\"", "dir_path", "=", "'.'", "ErSamps", "=", "[", "]", "SampOuts", "=", "[", "]", "samp_file", "=", "'er_samples.txt'", "meas_file", "=", "'magic_measurements.txt'", "#", "# get command line arguments", "#", "if", "command_line", ":", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-ID'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-ID'", ")", "input_dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "else", ":", "input_dir_path", "=", "dir_path", "output_dir_path", "=", "dir_path", "if", "\"-h\"", "in", "args", ":", "print", "(", "main", ".", "__doc__", ")", "return", "False", "if", "'-F'", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-F\"", ")", "meas_file", "=", "args", "[", "ind", "+", "1", "]", "if", "'-Fsa'", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-Fsa\"", ")", "samp_file", "=", "args", "[", "ind", "+", "1", "]", "#try:", "# open(samp_file,'r')", "# ErSamps,file_type=pmag.magic_read(samp_file)", "# print 'sample information will be appended to ', samp_file ", "#except:", "# print samp_file,' not found: sample information will be stored in new er_samples.txt file'", "# samp_file = output_dir_path+'/er_samples.txt'", "if", "'-f'", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-f\"", ")", "mag_file", "=", "args", "[", "ind", "+", "1", "]", "if", "\"-spc\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-spc\"", ")", "specnum", "=", "int", "(", "args", "[", "ind", "+", "1", "]", ")", "if", "\"-ncn\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-ncn\"", ")", "samp_con", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "\"-loc\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-loc\"", ")", "er_location_name", "=", "args", "[", "ind", "+", "1", "]", "if", "\"-A\"", "in", "args", ":", "noave", "=", "1", "if", "\"-mcd\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-mcd\"", ")", "meth_code", "=", "args", "[", "ind", "+", "1", "]", "if", "\"-lat\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-lat\"", ")", "site_lat", "=", "args", "[", "ind", "+", "1", "]", "if", "\"-lon\"", "in", "args", ":", "ind", "=", "args", ".", "index", "(", "\"-lon\"", ")", "site_lon", "=", "args", "[", "ind", "+", "1", "]", "if", "not", "command_line", ":", "dir_path", "=", "kwargs", ".", "get", "(", "'dir_path'", ",", "'.'", ")", "input_dir_path", "=", "kwargs", ".", "get", "(", "'input_dir_path'", ",", "dir_path", ")", "output_dir_path", "=", "dir_path", "meas_file", "=", "kwargs", ".", "get", "(", "'meas_file'", ",", "'magic_measurements.txt'", ")", "mag_file", "=", "kwargs", ".", "get", "(", "'mag_file'", ")", "spec_file", "=", "kwargs", ".", "get", "(", "'spec_file'", ",", "'er_specimens.txt'", ")", "samp_file", "=", "kwargs", ".", "get", "(", "'samp_file'", ",", "'er_samples.txt'", ")", "site_file", "=", "kwargs", ".", "get", "(", "'site_file'", ",", "'er_sites.txt'", ")", "site_lat", "=", "kwargs", ".", "get", "(", "'site_lat'", ",", "0", ")", "site_lon", "=", "kwargs", ".", "get", "(", "'site_lon'", ",", "0", ")", "specnum", "=", "kwargs", ".", "get", "(", "'specnum'", ",", "0", ")", "samp_con", "=", "kwargs", ".", "get", "(", "'samp_con'", ",", "'1'", ")", "er_location_name", "=", "kwargs", ".", "get", "(", "'er_location_name'", ",", "''", ")", "noave", "=", "kwargs", ".", "get", "(", "'noave'", ",", "0", ")", "# default (0) means DO average", "meth_code", "=", "kwargs", ".", "get", "(", "'meth_code'", ",", "\"LP-NO\"", ")", "print", "(", "samp_con", ")", "# format variables", "mag_file", "=", "os", ".", "path", ".", "join", "(", "input_dir_path", ",", "mag_file", ")", "meas_file", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "meas_file", ")", "spec_file", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "spec_file", ")", "samp_file", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "samp_file", ")", "site_file", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "site_file", ")", "if", "specnum", "!=", "0", ":", "specnum", "=", "-", "specnum", "if", "\"4\"", "in", "samp_con", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"naming convention option [4] must be in form 4-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [4] must be in form 4-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"4\"", "if", "\"7\"", "in", "samp_con", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [7] must be in form 7-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [7] must be in form 7-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"7\"", "# parse data", "data", "=", "open", "(", "mag_file", ",", "'r'", ")", ".", "readlines", "(", ")", "# read in data from file", "comment", "=", "data", "[", "0", "]", "line", "=", "data", "[", "1", "]", ".", "strip", "(", ")", "line", "=", "line", ".", "replace", "(", "\"=\"", ",", "\"= \"", ")", "# make finding orientations easier", "rec", "=", "line", ".", "split", "(", ")", "# read in sample orientation, etc.", "er_specimen_name", "=", "rec", "[", "0", "]", "ErSpecRec", ",", "ErSampRec", ",", "ErSiteRec", "=", "{", "}", ",", "{", "}", ",", "{", "}", "# make a sample record", "if", "specnum", "!=", "0", ":", "er_sample_name", "=", "rec", "[", "0", "]", "[", ":", "specnum", "]", "else", ":", "er_sample_name", "=", "rec", "[", "0", "]", "if", "len", "(", "ErSamps", ")", ">", "0", ":", "# need to copy existing", "for", "samp", "in", "ErSamps", ":", "if", "samp", "[", "'er_sample_name'", "]", "==", "er_sample_name", ":", "ErSampRec", "=", "samp", "# we'll ammend this one", "else", ":", "SampOuts", ".", "append", "(", "samp", ")", "# keep all the others", "if", "int", "(", "samp_con", ")", "<", "6", ":", "er_site_name", "=", "pmag", ".", "parse_site", "(", "er_sample_name", ",", "samp_con", ",", "Z", ")", "else", ":", "if", "'er_site_name'", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "er_site_name", "=", "ErSampREc", "[", "'er_site_name'", "]", "if", "'er_location_name'", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "er_location_name", "=", "ErSampREc", "[", "'er_location_name'", "]", "az_ind", "=", "rec", ".", "index", "(", "'a='", ")", "+", "1", "ErSampRec", "[", "'er_sample_name'", "]", "=", "er_sample_name", "ErSampRec", "[", "'er_sample_description'", "]", "=", "comment", "ErSampRec", "[", "'sample_azimuth'", "]", "=", "rec", "[", "az_ind", "]", "dip_ind", "=", "rec", ".", "index", "(", "'b='", ")", "+", "1", "dip", "=", "-", "float", "(", "rec", "[", "dip_ind", "]", ")", "ErSampRec", "[", "'sample_dip'", "]", "=", "'%7.1f'", "%", "(", "dip", ")", "strike_ind", "=", "rec", ".", "index", "(", "'s='", ")", "+", "1", "ErSampRec", "[", "'sample_bed_dip_direction'", "]", "=", "'%7.1f'", "%", "(", "float", "(", "rec", "[", "strike_ind", "]", ")", "+", "90.", ")", "bd_ind", "=", "rec", ".", "index", "(", "'d='", ")", "+", "1", "ErSampRec", "[", "'sample_bed_dip'", "]", "=", "rec", "[", "bd_ind", "]", "v_ind", "=", "rec", ".", "index", "(", "'v='", ")", "+", "1", "vol", "=", "rec", "[", "v_ind", "]", "[", ":", "-", "3", "]", "date", "=", "rec", "[", "-", "2", "]", "time", "=", "rec", "[", "-", "1", "]", "ErSampRec", "[", "'magic_method_codes'", "]", "=", "meth_code", "if", "'er_location_name'", "not", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "ErSampRec", "[", "'er_location_name'", "]", "=", "er_location_name", "if", "'er_site_name'", "not", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "ErSampRec", "[", "'er_site_name'", "]", "=", "er_site_name", "if", "'er_citation_names'", "not", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "ErSampRec", "[", "'er_citation_names'", "]", "=", "'This study'", "if", "'magic_method_codes'", "not", "in", "list", "(", "ErSampRec", ".", "keys", "(", ")", ")", ":", "ErSampRec", "[", "'magic_method_codes'", "]", "=", "'SO-NO'", "ErSpecRec", "[", "'er_specimen_name'", "]", "=", "er_specimen_name", "ErSpecRec", "[", "'er_sample_name'", "]", "=", "er_sample_name", "ErSpecRec", "[", "'er_site_name'", "]", "=", "er_site_name", "ErSpecRec", "[", "'er_location_name'", "]", "=", "er_location_name", "ErSpecRec", "[", "'er_citation_names'", "]", "=", "'This study'", "ErSiteRec", "[", "'er_site_name'", "]", "=", "er_site_name", "ErSiteRec", "[", "'er_location_name'", "]", "=", "er_location_name", "ErSiteRec", "[", "'er_citation_names'", "]", "=", "'This study'", "ErSiteRec", "[", "'site_lat'", "]", "=", "site_lat", "ErSiteRec", "[", "'site_lon'", "]", "=", "site_lon", "SpecOuts", ".", "append", "(", "ErSpecRec", ")", "SampOuts", ".", "append", "(", "ErSampRec", ")", "SiteOuts", ".", "append", "(", "ErSiteRec", ")", "for", "k", "in", "range", "(", "3", ",", "len", "(", "data", ")", ")", ":", "# read in data", "line", "=", "data", "[", "k", "]", "rec", "=", "line", ".", "split", "(", ")", "if", "len", "(", "rec", ")", ">", "1", ":", "# skip blank lines at bottom ", "MagRec", "=", "{", "}", "MagRec", "[", "'measurement_description'", "]", "=", "'Date: '", "+", "date", "+", "' '", "+", "time", "MagRec", "[", "\"er_citation_names\"", "]", "=", "\"This study\"", "MagRec", "[", "'er_location_name'", "]", "=", "er_location_name", "MagRec", "[", "'er_site_name'", "]", "=", "er_site_name", "MagRec", "[", "'er_sample_name'", "]", "=", "er_sample_name", "MagRec", "[", "'magic_software_packages'", "]", "=", "version_num", "MagRec", "[", "\"treatment_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MagRec", "[", "\"measurement_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MagRec", "[", "\"measurement_flag\"", "]", "=", "'g'", "MagRec", "[", "\"measurement_standard\"", "]", "=", "'u'", "MagRec", "[", "\"measurement_number\"", "]", "=", "'1'", "MagRec", "[", "\"er_specimen_name\"", "]", "=", "er_specimen_name", "if", "rec", "[", "0", "]", "==", "'NRM'", ":", "meas_type", "=", "\"LT-NO\"", "elif", "rec", "[", "0", "]", "[", "0", "]", "==", "'M'", "or", "rec", "[", "0", "]", "[", "0", "]", "==", "'H'", ":", "meas_type", "=", "\"LT-AF-Z\"", "elif", "rec", "[", "0", "]", "[", "0", "]", "==", "'T'", ":", "meas_type", "=", "\"LT-T-Z\"", "else", ":", "print", "(", "\"measurement type unknown\"", ")", "return", "False", ",", "\"measurement type unknown\"", "X", "=", "[", "float", "(", "rec", "[", "1", "]", ")", ",", "float", "(", "rec", "[", "2", "]", ")", ",", "float", "(", "rec", "[", "3", "]", ")", "]", "Vec", "=", "pmag", ".", "cart2dir", "(", "X", ")", "MagRec", "[", "\"measurement_magn_moment\"", "]", "=", "'%10.3e'", "%", "(", "Vec", "[", "2", "]", ")", "# Am^2 ", "MagRec", "[", "\"measurement_magn_volume\"", "]", "=", "rec", "[", "4", "]", "# A/m ", "MagRec", "[", "\"measurement_dec\"", "]", "=", "'%7.1f'", "%", "(", "Vec", "[", "0", "]", ")", "MagRec", "[", "\"measurement_inc\"", "]", "=", "'%7.1f'", "%", "(", "Vec", "[", "1", "]", ")", "MagRec", "[", "\"treatment_ac_field\"", "]", "=", "'0'", "if", "meas_type", "!=", "'LT-NO'", ":", "treat", "=", "float", "(", "rec", "[", "0", "]", "[", "1", ":", "]", ")", "else", ":", "treat", "=", "0", "if", "meas_type", "==", "\"LT-AF-Z\"", ":", "MagRec", "[", "\"treatment_ac_field\"", "]", "=", "'%8.3e'", "%", "(", "treat", "*", "1e-3", ")", "# convert from mT to tesla", "elif", "meas_type", "==", "\"LT-T-Z\"", ":", "MagRec", "[", "\"treatment_temp\"", "]", "=", "'%8.3e'", "%", "(", "treat", "+", "273.", ")", "# temp in kelvin", "MagRec", "[", "'magic_method_codes'", "]", "=", "meas_type", "MagRecs", ".", "append", "(", "MagRec", ")", "MagOuts", "=", "pmag", ".", "measurements_methods", "(", "MagRecs", ",", "noave", ")", "pmag", ".", "magic_write", "(", "meas_file", ",", "MagOuts", ",", "'magic_measurements'", ")", "print", "(", "\"results put in \"", ",", "meas_file", ")", "pmag", ".", "magic_write", "(", "samp_file", ",", "SpecOuts", ",", "'er_specimens'", ")", "pmag", ".", "magic_write", "(", "samp_file", ",", "SampOuts", ",", "'er_samples'", ")", "pmag", ".", "magic_write", "(", "samp_file", ",", "SiteOuts", ",", "'er_sites'", ")", "return", "True", ",", "meas_file" ]
38.388889
0.021065
def up(self): """ Function operates on the IMCInterface object and configures the interface into an administratively up state and refreshes contents of self.adminstatus :return: """ set_interface_up(self.ifIndex, self.auth, self.url, devip=self.ip) self.adminstatus = get_interface_details(self.ifIndex, self.auth, self.url, devip=self.ip)[ 'adminStatusDesc']
[ "def", "up", "(", "self", ")", ":", "set_interface_up", "(", "self", ".", "ifIndex", ",", "self", ".", "auth", ",", "self", ".", "url", ",", "devip", "=", "self", ".", "ip", ")", "self", ".", "adminstatus", "=", "get_interface_details", "(", "self", ".", "ifIndex", ",", "self", ".", "auth", ",", "self", ".", "url", ",", "devip", "=", "self", ".", "ip", ")", "[", "'adminStatusDesc'", "]" ]
50.111111
0.010893
def reserve_file(self, relative_path): """reserve a XML file for the slice at <relative_path>.xml - the relative path will be created for you - not writing anything to that file is an error """ if os.path.isabs(relative_path): raise ValueError('%s must be a relative path' % relative_path) dest_path = os.path.join(self.root_dir, '%s.xml' % relative_path) if os.path.exists(dest_path): raise ValueError('%r must not already exist' % dest_path) if dest_path in self.expected_xunit_files: raise ValueError('%r already reserved' % dest_path) dest_dir = os.path.dirname(dest_path) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) self.expected_xunit_files.append(dest_path) return dest_path
[ "def", "reserve_file", "(", "self", ",", "relative_path", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "relative_path", ")", ":", "raise", "ValueError", "(", "'%s must be a relative path'", "%", "relative_path", ")", "dest_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_dir", ",", "'%s.xml'", "%", "relative_path", ")", "if", "os", ".", "path", ".", "exists", "(", "dest_path", ")", ":", "raise", "ValueError", "(", "'%r must not already exist'", "%", "dest_path", ")", "if", "dest_path", "in", "self", ".", "expected_xunit_files", ":", "raise", "ValueError", "(", "'%r already reserved'", "%", "dest_path", ")", "dest_dir", "=", "os", ".", "path", ".", "dirname", "(", "dest_path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dest_dir", ")", ":", "os", ".", "makedirs", "(", "dest_dir", ")", "self", ".", "expected_xunit_files", ".", "append", "(", "dest_path", ")", "return", "dest_path" ]
34.125
0.002375
def initialize_tracer(self, io_loop=None): """ Initialize Jaeger Tracer based on the passed `jaeger_client.Config`. Save it to `opentracing.tracer` global variable. Only the first call to this method has any effect. """ with Config._initialized_lock: if Config._initialized: logger.warn('Jaeger tracer already initialized, skipping') return Config._initialized = True tracer = self.new_tracer(io_loop) self._initialize_global_tracer(tracer=tracer) return tracer
[ "def", "initialize_tracer", "(", "self", ",", "io_loop", "=", "None", ")", ":", "with", "Config", ".", "_initialized_lock", ":", "if", "Config", ".", "_initialized", ":", "logger", ".", "warn", "(", "'Jaeger tracer already initialized, skipping'", ")", "return", "Config", ".", "_initialized", "=", "True", "tracer", "=", "self", ".", "new_tracer", "(", "io_loop", ")", "self", ".", "_initialize_global_tracer", "(", "tracer", "=", "tracer", ")", "return", "tracer" ]
33.882353
0.003378
def apply_M(self, ax, ay): """Linear operator that converts ax, ay to abcd. """ jac = numpy.array( [[self.dx.dot(ax), self.dy.dot(ax)], [self.dx.dot(ay), self.dy.dot(ay)]] ) # jacs and J are of shape (2, 2, k). M must be of the same shape and # contain the result of the k 2x2 dot products. Perhaps there's a # dot() for this. M = numpy.einsum("ijl,jkl->ikl", jac, self.J) # M = numpy.array([ # [ # jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0], # jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1], # ], # [ # jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0], # jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1], # ], # ]) # One could use # # M = numpy.moveaxis(M, -1, 0) # _, sigma, _ = numpy.linalg.svd(M) # # but computing the singular values explicitly via # <https://scicomp.stackexchange.com/a/14103/3980> is faster and more # explicit. a = (M[0, 0] + M[1, 1]) / 2 b = (M[0, 0] - M[1, 1]) / 2 c = (M[1, 0] + M[0, 1]) / 2 d = (M[1, 0] - M[0, 1]) / 2 return a, b, c, d
[ "def", "apply_M", "(", "self", ",", "ax", ",", "ay", ")", ":", "jac", "=", "numpy", ".", "array", "(", "[", "[", "self", ".", "dx", ".", "dot", "(", "ax", ")", ",", "self", ".", "dy", ".", "dot", "(", "ax", ")", "]", ",", "[", "self", ".", "dx", ".", "dot", "(", "ay", ")", ",", "self", ".", "dy", ".", "dot", "(", "ay", ")", "]", "]", ")", "# jacs and J are of shape (2, 2, k). M must be of the same shape and", "# contain the result of the k 2x2 dot products. Perhaps there's a", "# dot() for this.", "M", "=", "numpy", ".", "einsum", "(", "\"ijl,jkl->ikl\"", ",", "jac", ",", "self", ".", "J", ")", "# M = numpy.array([", "# [", "# jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0],", "# jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1],", "# ],", "# [", "# jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0],", "# jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1],", "# ],", "# ])", "# One could use", "#", "# M = numpy.moveaxis(M, -1, 0)", "# _, sigma, _ = numpy.linalg.svd(M)", "#", "# but computing the singular values explicitly via", "# <https://scicomp.stackexchange.com/a/14103/3980> is faster and more", "# explicit.", "a", "=", "(", "M", "[", "0", ",", "0", "]", "+", "M", "[", "1", ",", "1", "]", ")", "/", "2", "b", "=", "(", "M", "[", "0", ",", "0", "]", "-", "M", "[", "1", ",", "1", "]", ")", "/", "2", "c", "=", "(", "M", "[", "1", ",", "0", "]", "+", "M", "[", "0", ",", "1", "]", ")", "/", "2", "d", "=", "(", "M", "[", "1", ",", "0", "]", "-", "M", "[", "0", ",", "1", "]", ")", "/", "2", "return", "a", ",", "b", ",", "c", ",", "d" ]
34.916667
0.002322
def main_crop(): """This function does the real work. It is called by main() in pdfCropMargins.py, which just handles catching exceptions and cleaning up.""" ## ## Process some of the command-line arguments. ## if args.verbose: print("\nProcessing the PDF with pdfCropMargins (version", __version__+")...") print("System type:", ex.system_os) if len(args.pdf_input_doc) > 1: print("\nError in pdfCropMargins: Only one input PDF document is allowed." "\nFound more than one on the command line:", file=sys.stderr) for f in args.pdf_input_doc: print(" ", f, file=sys.stderr) ex.cleanup_and_exit(1) input_doc_fname = ex.glob_if_windows_os(args.pdf_input_doc[0], exact_num_args=1)[0] if not input_doc_fname.endswith((".pdf",".PDF")): print("\nWarning in pdfCropMargins: The file extension is neither '.pdf'" "\nnor '.PDF'; continuing anyway.\n", file=sys.stderr) if args.verbose: print("\nThe input document's filename is:\n ", input_doc_fname) if not os.path.isfile(input_doc_fname): print("\nError in pdfCropMargins: The specified input file\n " + input_doc_fname + "\nis not a file or does not exist.", file=sys.stderr) ex.cleanup_and_exit(1) if not args.outfile: if args.verbose: print("\nUsing the default-generated output filename.") output_doc_fname = generate_default_filename(input_doc_fname) else: output_doc_fname = ex.glob_if_windows_os(args.outfile[0], exact_num_args=1)[0] if args.verbose: print("\nThe output document's filename will be:\n ", output_doc_fname) if os.path.lexists(output_doc_fname) and args.noclobber: print("\nOption '--noclobber' is set, refusing to overwrite an existing" "\nfile with filename:\n ", output_doc_fname, file=sys.stderr) ex.cleanup_and_exit(1) if os.path.lexists(output_doc_fname) and ex.samefile(input_doc_fname, output_doc_fname): print("\nError in pdfCropMargins: The input file is the same as" "\nthe output file.\n", file=sys.stderr) ex.cleanup_and_exit(1) if args.gsBbox and len(args.fullPageBox) > 1: print("\nWarning: only one --fullPageBox value can be used with the -gs option.", "\nIgnoring all but the first one.", file=sys.stderr) args.fullPageBox = [args.fullPageBox[0]] elif args.gsBbox and not args.fullPageBox: args.fullPageBox = ["c"] # gs default elif not args.fullPageBox: args.fullPageBox = ["m", "c"] # usual default if args.verbose: print("\nFor the full page size, using values from the PDF box" "\nspecified by the intersection of these boxes:", args.fullPageBox) if args.absolutePreCrop: args.absolutePreCrop *= 4 # expand to 4 offsets # See if all four offsets are explicitly set and use those if so. if args.absolutePreCrop4: args.absolutePreCrop = args.absolutePreCrop4 if args.verbose: print("\nThe absolute pre-crops to be applied to each margin, in units of bp," " are:\n ", args.absolutePreCrop) if args.percentRetain: args.percentRetain *= 4 # expand to 4 percents # See if all four percents are explicitly set and use those if so. if args.percentRetain4: args.percentRetain = args.percentRetain4 if args.verbose: print("\nThe percentages of margins to retain are:\n ", args.percentRetain) if args.absoluteOffset: args.absoluteOffset *= 4 # expand to 4 offsets # See if all four offsets are explicitly set and use those if so. if args.absoluteOffset4: args.absoluteOffset = args.absoluteOffset4 if args.verbose: print("\nThe absolute offsets to be applied to each margin, in units of bp," " are:\n ", args.absoluteOffset) # Parse the page ratio into a float if user chose that option. if args.setPageRatios: ratio = args.setPageRatios[0].split(":") if len(ratio) > 2: print("\nError in pdfCropMargins: Bad format in aspect ratio command line" " argument.\nToo many colons.") ex.cleanup_and_exit(1) try: if len(ratio) == 2: args.setPageRatios[0] = float(ratio[0])/float(ratio[1]) else: args.setPageRatios[0] = float(ratio[0]) except ValueError: print("\nError in pdfCropMargins: Bad format in aspect ratio command line" " argument.\nCannot convert to a float.") ex.cleanup_and_exit(1) # Set executable paths to non-default locations if set. if args.pdftoppmPath: ex.set_pdftoppm_executable_to_string(args.pdftoppmPath) if args.ghostscriptPath: ex.set_gs_executable_to_string(args.ghostscriptPath) # If the option settings require pdftoppm, make sure we have a running # version. If '--gsBbox' isn't chosen then assume that PDF pages are to be # explicitly rendered. In that case we either need pdftoppm or gs to do the # rendering. gs_render_fallback_set = False # Set True if we switch to gs option as a fallback. if not args.gsBbox and not args.gsRender: found_pdftoppm = ex.init_and_test_pdftoppm_executable( prefer_local=args.pdftoppmLocal) if args.verbose: print("\nFound pdftoppm program at:", found_pdftoppm) if not found_pdftoppm: args.gsRender = True gs_render_fallback_set = True if args.verbose: print("\nNo pdftoppm executable found; using Ghostscript for rendering.") # If any options require Ghostscript, make sure it it installed. if args.gsBbox or args.gsFix or args.gsRender: found_gs = ex.init_and_test_gs_executable() if args.verbose: print("\nFound Ghostscript program at:", found_gs) if args.gsBbox and not found_gs: print("\nError in pdfCropMargins: The '--gsBbox' option was specified but" "\nthe Ghostscript executable could not be located. Is it" "\ninstalled and in the PATH for command execution?\n", file=sys.stderr) ex.cleanup_and_exit(1) if args.gsFix and not found_gs: print("\nError in pdfCropMargins: The '--gsFix' option was specified but" "\nthe Ghostscript executable could not be located. Is it" "\ninstalled and in the PATH for command execution?\n", file=sys.stderr) ex.cleanup_and_exit(1) if args.gsRender and not found_gs: if gs_render_fallback_set: print("\nError in pdfCropMargins: Neither Ghostscript nor pdftoppm" "\nwas found in the PATH for command execution. At least one is" "\nrequired.\n", file=sys.stderr) else: print("\nError in pdfCropMargins: The '--gsRender' option was specified but" "\nthe Ghostscript executable could not be located. Is it" "\ninstalled and in the PATH for command execution?\n", file=sys.stderr) ex.cleanup_and_exit(1) # Give a warning message if incompatible option combinations have been selected. if args.gsBbox and args.threshold: print("\nWarning in pdfCropMargins: The '--threshold' option is ignored" "\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr) if args.gsBbox and args.numBlurs: print("\nWarning in pdfCropMargins: The '--numBlurs' option is ignored" "\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr) if args.gsBbox and args.numSmooths: print("\nWarning in pdfCropMargins: The '--numSmooths' option is ignored" "\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr) ## ## Open the input document in a PdfFileReader object. Due to an apparent bug ## in pyPdf we open two PdfFileReader objects for the file. The time required should ## still be small relative to finding the bounding boxes of pages. The bug is ## that writing a PdfFileWriter tends to hang on certain files if 1) pages from ## the same PdfFileReader are shared between two PdfFileWriter objects, or 2) ## the PdfFileWriter is written, the pages are modified, and there is an attempt ## to write the same PdfFileWriter to a different file. ## if args.gsFix: if args.verbose: print("\nAttempting to fix the PDF input file before reading it...") fixed_input_doc_fname = ex.fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname) else: fixed_input_doc_fname = input_doc_fname # Open the input file object. # TODO: Need try except since might fail for permissions. fixed_input_doc_file_object = open(fixed_input_doc_fname, "rb") try: input_doc = PdfFileReader(fixed_input_doc_file_object) tmp_input_doc = PdfFileReader(fixed_input_doc_file_object) except (KeyboardInterrupt, EOFError): raise except: # Can raise various exceptions, just catch the rest here. print("\nError in pdfCropMargins: The pyPdf module failed in an attempt" "\nto read the input file. Is the file a PDF file? If so then it" "\nmay be corrupted. If you have Ghostscript, try the '--gsFix'" "\noption (assuming you are not using it already). That option can" "\nalso convert some PostScript files to a readable format.", file=sys.stderr) ex.cleanup_and_exit(1) ## ## See if the document needs to be decrypted. ## if args.password: try: input_doc.decrypt(args.password) tmp_input_doc.decrypt(args.password) except KeyError: print("\nDecrypting with the password from the '--password' option" "\nfailed.", file=sys.stderr) ex.cleanup_and_exit(1) else: # try decrypting with an empty password try: input_doc.decrypt("") tmp_input_doc.decrypt("") except KeyError: pass # document apparently wasn't encrypted with an empty password ## ## Print out some data and metadata in verbose mode. ## if args.verbose: print("\nThe input document has %s pages." % input_doc.getNumPages()) try: # This is needed because the call sometimes just raises an error. metadata_info = input_doc.getDocumentInfo() except: print("\nWarning: Document metadata could not be read.", file=sys.stderr) metadata_info = None if args.verbose and not metadata_info: print("\nNo readable metadata in the document.") elif args.verbose: try: print("\nThe document's metadata, if set:\n") print(" The Author attribute set in the input document is:\n %s" % (metadata_info.author)) print(" The Creator attribute set in the input document is:\n %s" % (metadata_info.creator)) print(" The Producer attribute set in the input document is:\n %s" % (metadata_info.producer)) print(" The Subject attribute set in the input document is:\n %s" % (metadata_info.subject)) print(" The Title attribute set in the input document is:\n %s" % (metadata_info.title)) # Some metadata cannot be decoded or encoded, at least on Windows. Could # print from a function instead to write all the lines which can be written. except (UnicodeDecodeError, UnicodeEncodeError): print("\nWarning: Could not write all the document's metadata to the screen." "\nGot a UnicodeEncodeError or a UnicodeDecodeError.") ## ## Now compute the set containing the pyPdf page number of all the pages ## which the user has selected for cropping from the command line. Most ## calculations are still carried-out for all the pages in the document. ## (There are a few optimizations for expensive operations like finding ## bounding boxes; the rest is negligible). This keeps the correspondence ## between page numbers and the positions of boxes in the box lists. The ## function apply_crop_list then just ignores the cropping information for any ## pages which were not selected. ## all_page_nums = set(range(0, input_doc.getNumPages())) page_nums_to_crop = set() # Note that this set holds page num MINUS ONE, start at 0. if args.pages: # Parse any page range specifier argument. for page_num_or_range in args.pages.split(","): split_range = page_num_or_range.split("-") try: if len(split_range) == 1: # Note pyPdf page nums start at 0, not 1 like usual PDF pages, # subtract 1. page_nums_to_crop.add(int(split_range[0])-1) else: page_nums_to_crop.update( set(range(int(split_range[0])-1, int(split_range[1])))) except ValueError: print( "\nError in pdfCropMargins: The page range specified on the command", "\nline contains a non-integer value or otherwise cannot be parsed.", file=sys.stderr) ex.cleanup_and_exit(1) page_nums_to_crop = page_nums_to_crop & all_page_nums # intersect chosen with actual else: page_nums_to_crop = all_page_nums # Print out the pages to crop in verbose mode. if args.verbose and args.pages: print("\nThese pages of the document will be cropped:", end="") p_num_list = sorted(list(page_nums_to_crop)) num_pages_to_crop = len(p_num_list) for i in range(num_pages_to_crop): if i % 10 == 0 and i != num_pages_to_crop - 1: print("\n ", end="") print("%5d" % (p_num_list[i]+1), " ", end="") print() elif args.verbose: print("\nAll the pages of the document will be cropped.") ## ## Get a list with the full-page boxes for each page: (left,bottom,right,top) ## This function also sets the MediaBox and CropBox of the pages to the ## chosen full-page size as a side-effect, saving the old boxes. ## full_page_box_list, rotation_list = get_full_page_box_list_assigning_media_and_crop( input_doc) tmp_full_page_box_list, tmp_rotation_list = get_full_page_box_list_assigning_media_and_crop( tmp_input_doc, quiet=True) ## ## Define a PdfFileWriter object and copy input_doc info over to it. ## output_doc, tmp_output_doc, already_cropped_by_this_program = setup_output_document( input_doc, tmp_input_doc, metadata_info) ## ## Write out the PDF document again, with the CropBox and MediaBox reset. ## This temp version is only used for calculating the bounding boxes of ## pages. Note we are writing from tmp_output_doc (due to an apparent bug ## discussed above). After this tmp_input_doc and tmp_output_doc are no longer ## needed. ## if not args.restore: doc_with_crop_and_media_boxes_name = ex.get_temporary_filename(".pdf") doc_with_crop_and_media_boxes_object = open( doc_with_crop_and_media_boxes_name, "wb") if args.verbose: print("\nWriting out the PDF with the CropBox and MediaBox redefined.") try: tmp_output_doc.write(doc_with_crop_and_media_boxes_object) except (KeyboardInterrupt, EOFError): raise except: # PyPDF2 can raise various exceptions. print("\nError in pdfCropMargins: The pyPdf program failed in trying to" "\nwrite out a PDF file of the document. The document may be" "\ncorrupted. If you have Ghostscript, try using the '--gsFix'" "\noption (assuming you are not already using it).", file=sys.stderr) ex.cleanup_and_exit(1) doc_with_crop_and_media_boxes_object.close() ## ## Calculate the bounding_box_list containing tight page bounds for each page. ## if not args.restore: bounding_box_list = get_bounding_box_list(doc_with_crop_and_media_boxes_name, input_doc, full_page_box_list, page_nums_to_crop, args, PdfFileWriter) if args.verbose: print("\nThe bounding boxes are:") for pNum, b in enumerate(bounding_box_list): print("\t", pNum+1, "\t", b) ## ## Calculate the crop_list based on the fullpage boxes and the bounding boxes. ## if not args.restore: crop_list = calculate_crop_list(full_page_box_list, bounding_box_list, rotation_list, page_nums_to_crop) else: crop_list = None # Restore, not needed in this case. ## ## Apply the calculated crops to the pages of the PdfFileReader input_doc. ## These pages are copied to the PdfFileWriter output_doc. ## apply_crop_list(crop_list, input_doc, page_nums_to_crop, already_cropped_by_this_program) ## ## Write the final PDF out to a file. ## if args.verbose: print("\nWriting the cropped PDF file.") # TODO: Try and except on the open, since it might fail for permissions. output_doc_stream = open(output_doc_fname, "wb") try: output_doc.write(output_doc_stream) except (KeyboardInterrupt, EOFError): raise except: # PyPDF2 can raise various exceptions. try: # We know the write succeeded on tmp_output_doc or we wouldn't be here. # Malformed document catalog info can cause write failures, so get # a new output_doc without that data and try the write again. print("\nWrite failure, trying one more time...", file=sys.stderr) output_doc_stream.close() output_doc_stream = open(output_doc_fname, "wb") output_doc, tmp_output_doc, already_cropped = setup_output_document( input_doc, tmp_input_doc, metadata_info, copy_document_catalog=False) output_doc.write(output_doc_stream) print("\nWarning: Document catalog data caused a write failure. A retry" "\nwithout that data succeeded. No document catalog information was" "\ncopied to the cropped output file. Try fixing the PDF file. If" "\nyou have ghostscript installed, run pdfCropMargins with the '--gsFix'" "\noption. You can also try blacklisting some of the document catalog" "\nitems using the '--dcb' option.", file=sys.stderr) except (KeyboardInterrupt, EOFError): raise except: # Give up... PyPDF2 can raise many errors for many reasons. print("\nError in pdfCropMargins: The pyPdf program failed in trying to" "\nwrite out a PDF file of the document. The document may be" "\ncorrupted. If you have Ghostscript, try using the '--gsFix'" "\noption (assuming you are not already using it).", file=sys.stderr) ex.cleanup_and_exit(1) output_doc_stream.close() # We're finished with this open file; close it and let temp dir removal delete it. fixed_input_doc_file_object.close() ## ## Now handle the options which apply after the file is written. ## def do_preview(output_doc_fname): viewer = args.preview if args.verbose: print("\nPreviewing the output document with viewer:\n ", viewer) ex.show_preview(viewer, output_doc_fname) return # Handle the '--queryModifyOriginal' option. if args.queryModifyOriginal: if args.preview: print("\nRunning the preview viewer on the file, will query whether or not" "\nto modify the original file after the viewer is launched in the" "\nbackground...\n") do_preview(output_doc_fname) # Give preview time to start; it may write startup garbage to the terminal... query_wait_time = 2 # seconds time.sleep(query_wait_time) print() while True: query_string = "\nModify the original file to the cropped file " \ "(saving the original)? [yn] " if ex.python_version[0] == "2": query_result = raw_input(query_string).decode("utf-8") else: query_result = input(query_string) if query_result in ["y", "Y"]: args.modifyOriginal = True print("\nModifying the original file.") break elif query_result in ["n", "N"]: print("\nNot modifying the original file. The cropped file is saved" " as:\n {0}".format(output_doc_fname)) args.modifyOriginal = False break else: print("Response must be in the set {y,Y,n,N}, none recognized.") continue # Handle the '--modifyOriginal' option. if args.modifyOriginal: generated_uncropped_filename = generate_default_filename( input_doc_fname, is_cropped_file=False) # Remove any existing file with the name generated_uncropped_filename unless a # relevant noclobber option is set or it isn't a file. if os.path.exists(generated_uncropped_filename): if (os.path.isfile(generated_uncropped_filename) and not args.noclobberOriginal and not args.noclobber): if args.verbose: print("\nRemoving the file\n ", generated_uncropped_filename) try: os.remove(generated_uncropped_filename) except OSError: print("Removing the file {} failed. Maybe a permission error?" "\nFiles are as if option '--modifyOriginal' were not set." .format(generated_uncropped_filename)) args.modifyOriginal = False # Failed. else: print("\nA noclobber option is set or else not a file; refusing to" " overwrite:\n ", generated_uncropped_filename, "\nFiles are as if option '--modifyOriginal' were not set.", file=sys.stderr) args.modifyOriginal = False # Failed. # Move the original file to the name for uncropped files. Silently do nothing # if the file exists (should have been removed above). if not os.path.exists(generated_uncropped_filename): if args.verbose: print("\nDoing a file move:\n ", input_doc_fname, "\nis moving to:\n ", generated_uncropped_filename) shutil.move(input_doc_fname, generated_uncropped_filename) # Move the cropped file to the original file's name. Silently do nothing if # the file exists (should have been moved above). if not os.path.exists(input_doc_fname): if args.verbose: print("\nDoing a file move:\n ", output_doc_fname, "\nis moving to:\n ", input_doc_fname) shutil.move(output_doc_fname, input_doc_fname) # Handle any previewing which still needs to be done. if args.preview and not args.queryModifyOriginal: # already previewed in query mod if args.modifyOriginal: # already swapped to original filename in this case do_preview(input_doc_fname) else: # the usual case, preview the output filename do_preview(output_doc_fname) if args.verbose: print("\nFinished this run of pdfCropMargins.\n")
[ "def", "main_crop", "(", ")", ":", "##", "## Process some of the command-line arguments.", "##", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nProcessing the PDF with pdfCropMargins (version\"", ",", "__version__", "+", "\")...\"", ")", "print", "(", "\"System type:\"", ",", "ex", ".", "system_os", ")", "if", "len", "(", "args", ".", "pdf_input_doc", ")", ">", "1", ":", "print", "(", "\"\\nError in pdfCropMargins: Only one input PDF document is allowed.\"", "\"\\nFound more than one on the command line:\"", ",", "file", "=", "sys", ".", "stderr", ")", "for", "f", "in", "args", ".", "pdf_input_doc", ":", "print", "(", "\" \"", ",", "f", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "input_doc_fname", "=", "ex", ".", "glob_if_windows_os", "(", "args", ".", "pdf_input_doc", "[", "0", "]", ",", "exact_num_args", "=", "1", ")", "[", "0", "]", "if", "not", "input_doc_fname", ".", "endswith", "(", "(", "\".pdf\"", ",", "\".PDF\"", ")", ")", ":", "print", "(", "\"\\nWarning in pdfCropMargins: The file extension is neither '.pdf'\"", "\"\\nnor '.PDF'; continuing anyway.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe input document's filename is:\\n \"", ",", "input_doc_fname", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "input_doc_fname", ")", ":", "print", "(", "\"\\nError in pdfCropMargins: The specified input file\\n \"", "+", "input_doc_fname", "+", "\"\\nis not a file or does not exist.\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "if", "not", "args", ".", "outfile", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nUsing the default-generated output filename.\"", ")", "output_doc_fname", "=", "generate_default_filename", "(", "input_doc_fname", ")", "else", ":", "output_doc_fname", "=", "ex", ".", "glob_if_windows_os", "(", "args", ".", "outfile", "[", "0", "]", ",", "exact_num_args", "=", "1", ")", "[", "0", "]", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe output document's filename will be:\\n \"", ",", "output_doc_fname", ")", "if", "os", ".", "path", ".", "lexists", "(", "output_doc_fname", ")", "and", "args", ".", "noclobber", ":", "print", "(", "\"\\nOption '--noclobber' is set, refusing to overwrite an existing\"", "\"\\nfile with filename:\\n \"", ",", "output_doc_fname", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "if", "os", ".", "path", ".", "lexists", "(", "output_doc_fname", ")", "and", "ex", ".", "samefile", "(", "input_doc_fname", ",", "output_doc_fname", ")", ":", "print", "(", "\"\\nError in pdfCropMargins: The input file is the same as\"", "\"\\nthe output file.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "if", "args", ".", "gsBbox", "and", "len", "(", "args", ".", "fullPageBox", ")", ">", "1", ":", "print", "(", "\"\\nWarning: only one --fullPageBox value can be used with the -gs option.\"", ",", "\"\\nIgnoring all but the first one.\"", ",", "file", "=", "sys", ".", "stderr", ")", "args", ".", "fullPageBox", "=", "[", "args", ".", "fullPageBox", "[", "0", "]", "]", "elif", "args", ".", "gsBbox", "and", "not", "args", ".", "fullPageBox", ":", "args", ".", "fullPageBox", "=", "[", "\"c\"", "]", "# gs default", "elif", "not", "args", ".", "fullPageBox", ":", "args", ".", "fullPageBox", "=", "[", "\"m\"", ",", "\"c\"", "]", "# usual default", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nFor the full page size, using values from the PDF box\"", "\"\\nspecified by the intersection of these boxes:\"", ",", "args", ".", "fullPageBox", ")", "if", "args", ".", "absolutePreCrop", ":", "args", ".", "absolutePreCrop", "*=", "4", "# expand to 4 offsets", "# See if all four offsets are explicitly set and use those if so.", "if", "args", ".", "absolutePreCrop4", ":", "args", ".", "absolutePreCrop", "=", "args", ".", "absolutePreCrop4", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe absolute pre-crops to be applied to each margin, in units of bp,\"", "\" are:\\n \"", ",", "args", ".", "absolutePreCrop", ")", "if", "args", ".", "percentRetain", ":", "args", ".", "percentRetain", "*=", "4", "# expand to 4 percents", "# See if all four percents are explicitly set and use those if so.", "if", "args", ".", "percentRetain4", ":", "args", ".", "percentRetain", "=", "args", ".", "percentRetain4", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe percentages of margins to retain are:\\n \"", ",", "args", ".", "percentRetain", ")", "if", "args", ".", "absoluteOffset", ":", "args", ".", "absoluteOffset", "*=", "4", "# expand to 4 offsets", "# See if all four offsets are explicitly set and use those if so.", "if", "args", ".", "absoluteOffset4", ":", "args", ".", "absoluteOffset", "=", "args", ".", "absoluteOffset4", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe absolute offsets to be applied to each margin, in units of bp,\"", "\" are:\\n \"", ",", "args", ".", "absoluteOffset", ")", "# Parse the page ratio into a float if user chose that option.", "if", "args", ".", "setPageRatios", ":", "ratio", "=", "args", ".", "setPageRatios", "[", "0", "]", ".", "split", "(", "\":\"", ")", "if", "len", "(", "ratio", ")", ">", "2", ":", "print", "(", "\"\\nError in pdfCropMargins: Bad format in aspect ratio command line\"", "\" argument.\\nToo many colons.\"", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "try", ":", "if", "len", "(", "ratio", ")", "==", "2", ":", "args", ".", "setPageRatios", "[", "0", "]", "=", "float", "(", "ratio", "[", "0", "]", ")", "/", "float", "(", "ratio", "[", "1", "]", ")", "else", ":", "args", ".", "setPageRatios", "[", "0", "]", "=", "float", "(", "ratio", "[", "0", "]", ")", "except", "ValueError", ":", "print", "(", "\"\\nError in pdfCropMargins: Bad format in aspect ratio command line\"", "\" argument.\\nCannot convert to a float.\"", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "# Set executable paths to non-default locations if set.", "if", "args", ".", "pdftoppmPath", ":", "ex", ".", "set_pdftoppm_executable_to_string", "(", "args", ".", "pdftoppmPath", ")", "if", "args", ".", "ghostscriptPath", ":", "ex", ".", "set_gs_executable_to_string", "(", "args", ".", "ghostscriptPath", ")", "# If the option settings require pdftoppm, make sure we have a running", "# version. If '--gsBbox' isn't chosen then assume that PDF pages are to be", "# explicitly rendered. In that case we either need pdftoppm or gs to do the", "# rendering.", "gs_render_fallback_set", "=", "False", "# Set True if we switch to gs option as a fallback.", "if", "not", "args", ".", "gsBbox", "and", "not", "args", ".", "gsRender", ":", "found_pdftoppm", "=", "ex", ".", "init_and_test_pdftoppm_executable", "(", "prefer_local", "=", "args", ".", "pdftoppmLocal", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nFound pdftoppm program at:\"", ",", "found_pdftoppm", ")", "if", "not", "found_pdftoppm", ":", "args", ".", "gsRender", "=", "True", "gs_render_fallback_set", "=", "True", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nNo pdftoppm executable found; using Ghostscript for rendering.\"", ")", "# If any options require Ghostscript, make sure it it installed.", "if", "args", ".", "gsBbox", "or", "args", ".", "gsFix", "or", "args", ".", "gsRender", ":", "found_gs", "=", "ex", ".", "init_and_test_gs_executable", "(", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nFound Ghostscript program at:\"", ",", "found_gs", ")", "if", "args", ".", "gsBbox", "and", "not", "found_gs", ":", "print", "(", "\"\\nError in pdfCropMargins: The '--gsBbox' option was specified but\"", "\"\\nthe Ghostscript executable could not be located. Is it\"", "\"\\ninstalled and in the PATH for command execution?\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "if", "args", ".", "gsFix", "and", "not", "found_gs", ":", "print", "(", "\"\\nError in pdfCropMargins: The '--gsFix' option was specified but\"", "\"\\nthe Ghostscript executable could not be located. Is it\"", "\"\\ninstalled and in the PATH for command execution?\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "if", "args", ".", "gsRender", "and", "not", "found_gs", ":", "if", "gs_render_fallback_set", ":", "print", "(", "\"\\nError in pdfCropMargins: Neither Ghostscript nor pdftoppm\"", "\"\\nwas found in the PATH for command execution. At least one is\"", "\"\\nrequired.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "else", ":", "print", "(", "\"\\nError in pdfCropMargins: The '--gsRender' option was specified but\"", "\"\\nthe Ghostscript executable could not be located. Is it\"", "\"\\ninstalled and in the PATH for command execution?\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "# Give a warning message if incompatible option combinations have been selected.", "if", "args", ".", "gsBbox", "and", "args", ".", "threshold", ":", "print", "(", "\"\\nWarning in pdfCropMargins: The '--threshold' option is ignored\"", "\"\\nwhen the '--gsBbox' option is also selected.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "if", "args", ".", "gsBbox", "and", "args", ".", "numBlurs", ":", "print", "(", "\"\\nWarning in pdfCropMargins: The '--numBlurs' option is ignored\"", "\"\\nwhen the '--gsBbox' option is also selected.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "if", "args", ".", "gsBbox", "and", "args", ".", "numSmooths", ":", "print", "(", "\"\\nWarning in pdfCropMargins: The '--numSmooths' option is ignored\"", "\"\\nwhen the '--gsBbox' option is also selected.\\n\"", ",", "file", "=", "sys", ".", "stderr", ")", "##", "## Open the input document in a PdfFileReader object. Due to an apparent bug", "## in pyPdf we open two PdfFileReader objects for the file. The time required should", "## still be small relative to finding the bounding boxes of pages. The bug is", "## that writing a PdfFileWriter tends to hang on certain files if 1) pages from", "## the same PdfFileReader are shared between two PdfFileWriter objects, or 2)", "## the PdfFileWriter is written, the pages are modified, and there is an attempt", "## to write the same PdfFileWriter to a different file.", "##", "if", "args", ".", "gsFix", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nAttempting to fix the PDF input file before reading it...\"", ")", "fixed_input_doc_fname", "=", "ex", ".", "fix_pdf_with_ghostscript_to_tmp_file", "(", "input_doc_fname", ")", "else", ":", "fixed_input_doc_fname", "=", "input_doc_fname", "# Open the input file object.", "# TODO: Need try except since might fail for permissions.", "fixed_input_doc_file_object", "=", "open", "(", "fixed_input_doc_fname", ",", "\"rb\"", ")", "try", ":", "input_doc", "=", "PdfFileReader", "(", "fixed_input_doc_file_object", ")", "tmp_input_doc", "=", "PdfFileReader", "(", "fixed_input_doc_file_object", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "raise", "except", ":", "# Can raise various exceptions, just catch the rest here.", "print", "(", "\"\\nError in pdfCropMargins: The pyPdf module failed in an attempt\"", "\"\\nto read the input file. Is the file a PDF file? If so then it\"", "\"\\nmay be corrupted. If you have Ghostscript, try the '--gsFix'\"", "\"\\noption (assuming you are not using it already). That option can\"", "\"\\nalso convert some PostScript files to a readable format.\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "##", "## See if the document needs to be decrypted.", "##", "if", "args", ".", "password", ":", "try", ":", "input_doc", ".", "decrypt", "(", "args", ".", "password", ")", "tmp_input_doc", ".", "decrypt", "(", "args", ".", "password", ")", "except", "KeyError", ":", "print", "(", "\"\\nDecrypting with the password from the '--password' option\"", "\"\\nfailed.\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "else", ":", "# try decrypting with an empty password", "try", ":", "input_doc", ".", "decrypt", "(", "\"\"", ")", "tmp_input_doc", ".", "decrypt", "(", "\"\"", ")", "except", "KeyError", ":", "pass", "# document apparently wasn't encrypted with an empty password", "##", "## Print out some data and metadata in verbose mode.", "##", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe input document has %s pages.\"", "%", "input_doc", ".", "getNumPages", "(", ")", ")", "try", ":", "# This is needed because the call sometimes just raises an error.", "metadata_info", "=", "input_doc", ".", "getDocumentInfo", "(", ")", "except", ":", "print", "(", "\"\\nWarning: Document metadata could not be read.\"", ",", "file", "=", "sys", ".", "stderr", ")", "metadata_info", "=", "None", "if", "args", ".", "verbose", "and", "not", "metadata_info", ":", "print", "(", "\"\\nNo readable metadata in the document.\"", ")", "elif", "args", ".", "verbose", ":", "try", ":", "print", "(", "\"\\nThe document's metadata, if set:\\n\"", ")", "print", "(", "\" The Author attribute set in the input document is:\\n %s\"", "%", "(", "metadata_info", ".", "author", ")", ")", "print", "(", "\" The Creator attribute set in the input document is:\\n %s\"", "%", "(", "metadata_info", ".", "creator", ")", ")", "print", "(", "\" The Producer attribute set in the input document is:\\n %s\"", "%", "(", "metadata_info", ".", "producer", ")", ")", "print", "(", "\" The Subject attribute set in the input document is:\\n %s\"", "%", "(", "metadata_info", ".", "subject", ")", ")", "print", "(", "\" The Title attribute set in the input document is:\\n %s\"", "%", "(", "metadata_info", ".", "title", ")", ")", "# Some metadata cannot be decoded or encoded, at least on Windows. Could", "# print from a function instead to write all the lines which can be written.", "except", "(", "UnicodeDecodeError", ",", "UnicodeEncodeError", ")", ":", "print", "(", "\"\\nWarning: Could not write all the document's metadata to the screen.\"", "\"\\nGot a UnicodeEncodeError or a UnicodeDecodeError.\"", ")", "##", "## Now compute the set containing the pyPdf page number of all the pages", "## which the user has selected for cropping from the command line. Most", "## calculations are still carried-out for all the pages in the document.", "## (There are a few optimizations for expensive operations like finding", "## bounding boxes; the rest is negligible). This keeps the correspondence", "## between page numbers and the positions of boxes in the box lists. The", "## function apply_crop_list then just ignores the cropping information for any", "## pages which were not selected.", "##", "all_page_nums", "=", "set", "(", "range", "(", "0", ",", "input_doc", ".", "getNumPages", "(", ")", ")", ")", "page_nums_to_crop", "=", "set", "(", ")", "# Note that this set holds page num MINUS ONE, start at 0.", "if", "args", ".", "pages", ":", "# Parse any page range specifier argument.", "for", "page_num_or_range", "in", "args", ".", "pages", ".", "split", "(", "\",\"", ")", ":", "split_range", "=", "page_num_or_range", ".", "split", "(", "\"-\"", ")", "try", ":", "if", "len", "(", "split_range", ")", "==", "1", ":", "# Note pyPdf page nums start at 0, not 1 like usual PDF pages,", "# subtract 1.", "page_nums_to_crop", ".", "add", "(", "int", "(", "split_range", "[", "0", "]", ")", "-", "1", ")", "else", ":", "page_nums_to_crop", ".", "update", "(", "set", "(", "range", "(", "int", "(", "split_range", "[", "0", "]", ")", "-", "1", ",", "int", "(", "split_range", "[", "1", "]", ")", ")", ")", ")", "except", "ValueError", ":", "print", "(", "\"\\nError in pdfCropMargins: The page range specified on the command\"", ",", "\"\\nline contains a non-integer value or otherwise cannot be parsed.\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "page_nums_to_crop", "=", "page_nums_to_crop", "&", "all_page_nums", "# intersect chosen with actual", "else", ":", "page_nums_to_crop", "=", "all_page_nums", "# Print out the pages to crop in verbose mode.", "if", "args", ".", "verbose", "and", "args", ".", "pages", ":", "print", "(", "\"\\nThese pages of the document will be cropped:\"", ",", "end", "=", "\"\"", ")", "p_num_list", "=", "sorted", "(", "list", "(", "page_nums_to_crop", ")", ")", "num_pages_to_crop", "=", "len", "(", "p_num_list", ")", "for", "i", "in", "range", "(", "num_pages_to_crop", ")", ":", "if", "i", "%", "10", "==", "0", "and", "i", "!=", "num_pages_to_crop", "-", "1", ":", "print", "(", "\"\\n \"", ",", "end", "=", "\"\"", ")", "print", "(", "\"%5d\"", "%", "(", "p_num_list", "[", "i", "]", "+", "1", ")", ",", "\" \"", ",", "end", "=", "\"\"", ")", "print", "(", ")", "elif", "args", ".", "verbose", ":", "print", "(", "\"\\nAll the pages of the document will be cropped.\"", ")", "##", "## Get a list with the full-page boxes for each page: (left,bottom,right,top)", "## This function also sets the MediaBox and CropBox of the pages to the", "## chosen full-page size as a side-effect, saving the old boxes.", "##", "full_page_box_list", ",", "rotation_list", "=", "get_full_page_box_list_assigning_media_and_crop", "(", "input_doc", ")", "tmp_full_page_box_list", ",", "tmp_rotation_list", "=", "get_full_page_box_list_assigning_media_and_crop", "(", "tmp_input_doc", ",", "quiet", "=", "True", ")", "##", "## Define a PdfFileWriter object and copy input_doc info over to it.", "##", "output_doc", ",", "tmp_output_doc", ",", "already_cropped_by_this_program", "=", "setup_output_document", "(", "input_doc", ",", "tmp_input_doc", ",", "metadata_info", ")", "##", "## Write out the PDF document again, with the CropBox and MediaBox reset.", "## This temp version is only used for calculating the bounding boxes of", "## pages. Note we are writing from tmp_output_doc (due to an apparent bug", "## discussed above). After this tmp_input_doc and tmp_output_doc are no longer", "## needed.", "##", "if", "not", "args", ".", "restore", ":", "doc_with_crop_and_media_boxes_name", "=", "ex", ".", "get_temporary_filename", "(", "\".pdf\"", ")", "doc_with_crop_and_media_boxes_object", "=", "open", "(", "doc_with_crop_and_media_boxes_name", ",", "\"wb\"", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nWriting out the PDF with the CropBox and MediaBox redefined.\"", ")", "try", ":", "tmp_output_doc", ".", "write", "(", "doc_with_crop_and_media_boxes_object", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "raise", "except", ":", "# PyPDF2 can raise various exceptions.", "print", "(", "\"\\nError in pdfCropMargins: The pyPdf program failed in trying to\"", "\"\\nwrite out a PDF file of the document. The document may be\"", "\"\\ncorrupted. If you have Ghostscript, try using the '--gsFix'\"", "\"\\noption (assuming you are not already using it).\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "doc_with_crop_and_media_boxes_object", ".", "close", "(", ")", "##", "## Calculate the bounding_box_list containing tight page bounds for each page.", "##", "if", "not", "args", ".", "restore", ":", "bounding_box_list", "=", "get_bounding_box_list", "(", "doc_with_crop_and_media_boxes_name", ",", "input_doc", ",", "full_page_box_list", ",", "page_nums_to_crop", ",", "args", ",", "PdfFileWriter", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nThe bounding boxes are:\"", ")", "for", "pNum", ",", "b", "in", "enumerate", "(", "bounding_box_list", ")", ":", "print", "(", "\"\\t\"", ",", "pNum", "+", "1", ",", "\"\\t\"", ",", "b", ")", "##", "## Calculate the crop_list based on the fullpage boxes and the bounding boxes.", "##", "if", "not", "args", ".", "restore", ":", "crop_list", "=", "calculate_crop_list", "(", "full_page_box_list", ",", "bounding_box_list", ",", "rotation_list", ",", "page_nums_to_crop", ")", "else", ":", "crop_list", "=", "None", "# Restore, not needed in this case.", "##", "## Apply the calculated crops to the pages of the PdfFileReader input_doc.", "## These pages are copied to the PdfFileWriter output_doc.", "##", "apply_crop_list", "(", "crop_list", ",", "input_doc", ",", "page_nums_to_crop", ",", "already_cropped_by_this_program", ")", "##", "## Write the final PDF out to a file.", "##", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nWriting the cropped PDF file.\"", ")", "# TODO: Try and except on the open, since it might fail for permissions.", "output_doc_stream", "=", "open", "(", "output_doc_fname", ",", "\"wb\"", ")", "try", ":", "output_doc", ".", "write", "(", "output_doc_stream", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "raise", "except", ":", "# PyPDF2 can raise various exceptions.", "try", ":", "# We know the write succeeded on tmp_output_doc or we wouldn't be here.", "# Malformed document catalog info can cause write failures, so get", "# a new output_doc without that data and try the write again.", "print", "(", "\"\\nWrite failure, trying one more time...\"", ",", "file", "=", "sys", ".", "stderr", ")", "output_doc_stream", ".", "close", "(", ")", "output_doc_stream", "=", "open", "(", "output_doc_fname", ",", "\"wb\"", ")", "output_doc", ",", "tmp_output_doc", ",", "already_cropped", "=", "setup_output_document", "(", "input_doc", ",", "tmp_input_doc", ",", "metadata_info", ",", "copy_document_catalog", "=", "False", ")", "output_doc", ".", "write", "(", "output_doc_stream", ")", "print", "(", "\"\\nWarning: Document catalog data caused a write failure. A retry\"", "\"\\nwithout that data succeeded. No document catalog information was\"", "\"\\ncopied to the cropped output file. Try fixing the PDF file. If\"", "\"\\nyou have ghostscript installed, run pdfCropMargins with the '--gsFix'\"", "\"\\noption. You can also try blacklisting some of the document catalog\"", "\"\\nitems using the '--dcb' option.\"", ",", "file", "=", "sys", ".", "stderr", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "raise", "except", ":", "# Give up... PyPDF2 can raise many errors for many reasons.", "print", "(", "\"\\nError in pdfCropMargins: The pyPdf program failed in trying to\"", "\"\\nwrite out a PDF file of the document. The document may be\"", "\"\\ncorrupted. If you have Ghostscript, try using the '--gsFix'\"", "\"\\noption (assuming you are not already using it).\"", ",", "file", "=", "sys", ".", "stderr", ")", "ex", ".", "cleanup_and_exit", "(", "1", ")", "output_doc_stream", ".", "close", "(", ")", "# We're finished with this open file; close it and let temp dir removal delete it.", "fixed_input_doc_file_object", ".", "close", "(", ")", "##", "## Now handle the options which apply after the file is written.", "##", "def", "do_preview", "(", "output_doc_fname", ")", ":", "viewer", "=", "args", ".", "preview", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nPreviewing the output document with viewer:\\n \"", ",", "viewer", ")", "ex", ".", "show_preview", "(", "viewer", ",", "output_doc_fname", ")", "return", "# Handle the '--queryModifyOriginal' option.", "if", "args", ".", "queryModifyOriginal", ":", "if", "args", ".", "preview", ":", "print", "(", "\"\\nRunning the preview viewer on the file, will query whether or not\"", "\"\\nto modify the original file after the viewer is launched in the\"", "\"\\nbackground...\\n\"", ")", "do_preview", "(", "output_doc_fname", ")", "# Give preview time to start; it may write startup garbage to the terminal...", "query_wait_time", "=", "2", "# seconds", "time", ".", "sleep", "(", "query_wait_time", ")", "print", "(", ")", "while", "True", ":", "query_string", "=", "\"\\nModify the original file to the cropped file \"", "\"(saving the original)? [yn] \"", "if", "ex", ".", "python_version", "[", "0", "]", "==", "\"2\"", ":", "query_result", "=", "raw_input", "(", "query_string", ")", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "query_result", "=", "input", "(", "query_string", ")", "if", "query_result", "in", "[", "\"y\"", ",", "\"Y\"", "]", ":", "args", ".", "modifyOriginal", "=", "True", "print", "(", "\"\\nModifying the original file.\"", ")", "break", "elif", "query_result", "in", "[", "\"n\"", ",", "\"N\"", "]", ":", "print", "(", "\"\\nNot modifying the original file. The cropped file is saved\"", "\" as:\\n {0}\"", ".", "format", "(", "output_doc_fname", ")", ")", "args", ".", "modifyOriginal", "=", "False", "break", "else", ":", "print", "(", "\"Response must be in the set {y,Y,n,N}, none recognized.\"", ")", "continue", "# Handle the '--modifyOriginal' option.", "if", "args", ".", "modifyOriginal", ":", "generated_uncropped_filename", "=", "generate_default_filename", "(", "input_doc_fname", ",", "is_cropped_file", "=", "False", ")", "# Remove any existing file with the name generated_uncropped_filename unless a", "# relevant noclobber option is set or it isn't a file.", "if", "os", ".", "path", ".", "exists", "(", "generated_uncropped_filename", ")", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "generated_uncropped_filename", ")", "and", "not", "args", ".", "noclobberOriginal", "and", "not", "args", ".", "noclobber", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nRemoving the file\\n \"", ",", "generated_uncropped_filename", ")", "try", ":", "os", ".", "remove", "(", "generated_uncropped_filename", ")", "except", "OSError", ":", "print", "(", "\"Removing the file {} failed. Maybe a permission error?\"", "\"\\nFiles are as if option '--modifyOriginal' were not set.\"", ".", "format", "(", "generated_uncropped_filename", ")", ")", "args", ".", "modifyOriginal", "=", "False", "# Failed.", "else", ":", "print", "(", "\"\\nA noclobber option is set or else not a file; refusing to\"", "\" overwrite:\\n \"", ",", "generated_uncropped_filename", ",", "\"\\nFiles are as if option '--modifyOriginal' were not set.\"", ",", "file", "=", "sys", ".", "stderr", ")", "args", ".", "modifyOriginal", "=", "False", "# Failed.", "# Move the original file to the name for uncropped files. Silently do nothing", "# if the file exists (should have been removed above).", "if", "not", "os", ".", "path", ".", "exists", "(", "generated_uncropped_filename", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nDoing a file move:\\n \"", ",", "input_doc_fname", ",", "\"\\nis moving to:\\n \"", ",", "generated_uncropped_filename", ")", "shutil", ".", "move", "(", "input_doc_fname", ",", "generated_uncropped_filename", ")", "# Move the cropped file to the original file's name. Silently do nothing if", "# the file exists (should have been moved above).", "if", "not", "os", ".", "path", ".", "exists", "(", "input_doc_fname", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nDoing a file move:\\n \"", ",", "output_doc_fname", ",", "\"\\nis moving to:\\n \"", ",", "input_doc_fname", ")", "shutil", ".", "move", "(", "output_doc_fname", ",", "input_doc_fname", ")", "# Handle any previewing which still needs to be done.", "if", "args", ".", "preview", "and", "not", "args", ".", "queryModifyOriginal", ":", "# already previewed in query mod", "if", "args", ".", "modifyOriginal", ":", "# already swapped to original filename in this case", "do_preview", "(", "input_doc_fname", ")", "else", ":", "# the usual case, preview the output filename", "do_preview", "(", "output_doc_fname", ")", "if", "args", ".", "verbose", ":", "print", "(", "\"\\nFinished this run of pdfCropMargins.\\n\"", ")" ]
47.45328
0.008001
def _lookup_model(cls, kind, default_model=None): """Get the model class for the kind. Args: kind: A string representing the name of the kind to lookup. default_model: The model class to use if the kind can't be found. Returns: The model class for the requested kind. Raises: KindError: The kind was not found and no default_model was provided. """ modelclass = cls._kind_map.get(kind, default_model) if modelclass is None: raise KindError( "No model class found for kind '%s'. Did you forget to import it?" % kind) return modelclass
[ "def", "_lookup_model", "(", "cls", ",", "kind", ",", "default_model", "=", "None", ")", ":", "modelclass", "=", "cls", ".", "_kind_map", ".", "get", "(", "kind", ",", "default_model", ")", "if", "modelclass", "is", "None", ":", "raise", "KindError", "(", "\"No model class found for kind '%s'. Did you forget to import it?\"", "%", "kind", ")", "return", "modelclass" ]
33.333333
0.003241
def redirect_to_express(self): """ First step of ExpressCheckout. Redirect the request to PayPal using the data returned from setExpressCheckout. """ wpp = PayPalWPP(self.request) try: nvp_obj = wpp.setExpressCheckout(self.item) except PayPalFailure: warn_untested() self.context['errors'] = self.errors['paypal'] return self.render_payment_form() else: return HttpResponseRedirect(express_endpoint_for_token(nvp_obj.token))
[ "def", "redirect_to_express", "(", "self", ")", ":", "wpp", "=", "PayPalWPP", "(", "self", ".", "request", ")", "try", ":", "nvp_obj", "=", "wpp", ".", "setExpressCheckout", "(", "self", ".", "item", ")", "except", "PayPalFailure", ":", "warn_untested", "(", ")", "self", ".", "context", "[", "'errors'", "]", "=", "self", ".", "errors", "[", "'paypal'", "]", "return", "self", ".", "render_payment_form", "(", ")", "else", ":", "return", "HttpResponseRedirect", "(", "express_endpoint_for_token", "(", "nvp_obj", ".", "token", ")", ")" ]
38.214286
0.005474
def ib_group_member_add(self, group_id, userids): ''' ib group member add ''' req_hook = 'pod/v1/admin/group/' + group_id + '/membership/add' req_args = {'usersListId': userids} req_args = json.dumps(req_args) status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
[ "def", "ib_group_member_add", "(", "self", ",", "group_id", ",", "userids", ")", ":", "req_hook", "=", "'pod/v1/admin/group/'", "+", "group_id", "+", "'/membership/add'", "req_args", "=", "{", "'usersListId'", ":", "userids", "}", "req_args", "=", "json", ".", "dumps", "(", "req_args", ")", "status_code", ",", "response", "=", "self", ".", "__rest__", ".", "POST_query", "(", "req_hook", ",", "req_args", ")", "self", ".", "logger", ".", "debug", "(", "'%s: %s'", "%", "(", "status_code", ",", "response", ")", ")", "return", "status_code", ",", "response" ]
51.25
0.004796
def active_power(self): """ Takes the sum of all instantaneous active power values Returns them in kWh Returns ------- float """ inst = self.load_instantaneous() values = [float(i['value']) for i in inst if i['key'].endswith('ActivePower')] return sum(values) / 1000
[ "def", "active_power", "(", "self", ")", ":", "inst", "=", "self", ".", "load_instantaneous", "(", ")", "values", "=", "[", "float", "(", "i", "[", "'value'", "]", ")", "for", "i", "in", "inst", "if", "i", "[", "'key'", "]", ".", "endswith", "(", "'ActivePower'", ")", "]", "return", "sum", "(", "values", ")", "/", "1000" ]
28
0.008646
def delete_translation(self, language_code, related_name=None): """ Delete a translation from a model. :param language_code: The language to remove. :param related_name: If given, only the model matching that related_name is removed. """ if language_code is None: raise ValueError(get_null_language_error()) if related_name is None: metas = self._parler_meta else: metas = [self._parler_meta[related_name]] num_deleted = 0 for meta in metas: try: translation = self._get_translated_model(language_code, meta=meta) except meta.model.DoesNotExist: continue # By using the regular model delete, the cache is properly cleared # (via _delete_cached_translation) and signals are emitted. translation.delete() num_deleted += 1 # Clear other local caches try: del self._translations_cache[meta.model][language_code] except KeyError: pass try: del self._prefetched_objects_cache[meta.rel_name] except (AttributeError, KeyError): pass if not num_deleted: raise ValueError("Translation does not exist: {0}".format(language_code)) return num_deleted
[ "def", "delete_translation", "(", "self", ",", "language_code", ",", "related_name", "=", "None", ")", ":", "if", "language_code", "is", "None", ":", "raise", "ValueError", "(", "get_null_language_error", "(", ")", ")", "if", "related_name", "is", "None", ":", "metas", "=", "self", ".", "_parler_meta", "else", ":", "metas", "=", "[", "self", ".", "_parler_meta", "[", "related_name", "]", "]", "num_deleted", "=", "0", "for", "meta", "in", "metas", ":", "try", ":", "translation", "=", "self", ".", "_get_translated_model", "(", "language_code", ",", "meta", "=", "meta", ")", "except", "meta", ".", "model", ".", "DoesNotExist", ":", "continue", "# By using the regular model delete, the cache is properly cleared", "# (via _delete_cached_translation) and signals are emitted.", "translation", ".", "delete", "(", ")", "num_deleted", "+=", "1", "# Clear other local caches", "try", ":", "del", "self", ".", "_translations_cache", "[", "meta", ".", "model", "]", "[", "language_code", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "self", ".", "_prefetched_objects_cache", "[", "meta", ".", "rel_name", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "pass", "if", "not", "num_deleted", ":", "raise", "ValueError", "(", "\"Translation does not exist: {0}\"", ".", "format", "(", "language_code", ")", ")", "return", "num_deleted" ]
33.560976
0.003531
def imwrite(img, file_path, params=None, auto_mkdir=True): """Write image to file Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. """ if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
[ "def", "imwrite", "(", "img", ",", "file_path", ",", "params", "=", "None", ",", "auto_mkdir", "=", "True", ")", ":", "if", "auto_mkdir", ":", "dir_name", "=", "osp", ".", "abspath", "(", "osp", ".", "dirname", "(", "file_path", ")", ")", "mkdir_or_exist", "(", "dir_name", ")", "return", "cv2", ".", "imwrite", "(", "file_path", ",", "img", ",", "params", ")" ]
34.294118
0.001669
def _partial_search_validator(self, sub, sup, anagram=False, subsequence=False, supersequence=False): """ It's responsible for validating the partial results of `search` method. If it returns True, the search would return its result. Else, search method would discard what it found and look for others. First, checks to see if all elements of `sub` is in `sup` with at least the same frequency and then checks to see if every element `sub` appears in `sup` with the same order (index-wise). If advanced control sturctures are specified, the containment condition won't be checked. The code for index checking is from [1]_. Parameters ---------- sub : list sup : list anagram : bool, optional Default is `False` subsequence : bool, optional Default is `False` supersequence : bool, optional Default is `False` Returns ------- bool References ---------- .. [1] : ` https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist` """ def get_all_in(one, another): for element in one: if element in another: yield element def containment_check(sub, sup): return (set(Counter(sub).keys()).issubset( set(Counter(sup).keys()))) def containment_freq_check(sub, sup): return (all([Counter(sub)[element] <= Counter(sup)[element] for element in Counter(sub)])) def extra_freq_check(sub, sup, list_of_tups): # Would be used for matching anagrams, subsequences etc. return (len(list_of_tups) > 0 and all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]] for tup in list_of_tups])) # Regarding containment checking while having extra conditions, # there's no good way to map each anagram or subseuqnece etc. that was # found to the query word, without making it more complicated than # it already is, because a query word can be anagram/subsequence etc. # to multiple words of the timestamps yet finding the one with the # right index would be the problem. # Therefore we just approximate the solution by just counting # the elements. if len(sub) > len(sup): return False for pred, func in set([(anagram, self._is_anagram_of), (subsequence, self._is_subsequence_of), (supersequence, self._is_supersequence_of)]): if pred: pred_seive = [(sub_key, sup_key) for sub_key in set(Counter(sub).keys()) for sup_key in set(Counter(sup).keys()) if func(sub_key, sup_key)] if not extra_freq_check(sub, sup, pred_seive): return False if ( not any([anagram, subsequence, supersequence]) and (not containment_check(sub, sup) or not containment_freq_check(sub, sup)) ): return False for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)): if x1 != x2: return False return True
[ "def", "_partial_search_validator", "(", "self", ",", "sub", ",", "sup", ",", "anagram", "=", "False", ",", "subsequence", "=", "False", ",", "supersequence", "=", "False", ")", ":", "def", "get_all_in", "(", "one", ",", "another", ")", ":", "for", "element", "in", "one", ":", "if", "element", "in", "another", ":", "yield", "element", "def", "containment_check", "(", "sub", ",", "sup", ")", ":", "return", "(", "set", "(", "Counter", "(", "sub", ")", ".", "keys", "(", ")", ")", ".", "issubset", "(", "set", "(", "Counter", "(", "sup", ")", ".", "keys", "(", ")", ")", ")", ")", "def", "containment_freq_check", "(", "sub", ",", "sup", ")", ":", "return", "(", "all", "(", "[", "Counter", "(", "sub", ")", "[", "element", "]", "<=", "Counter", "(", "sup", ")", "[", "element", "]", "for", "element", "in", "Counter", "(", "sub", ")", "]", ")", ")", "def", "extra_freq_check", "(", "sub", ",", "sup", ",", "list_of_tups", ")", ":", "# Would be used for matching anagrams, subsequences etc.", "return", "(", "len", "(", "list_of_tups", ")", ">", "0", "and", "all", "(", "[", "Counter", "(", "sub", ")", "[", "tup", "[", "0", "]", "]", "<=", "Counter", "(", "sup", ")", "[", "tup", "[", "1", "]", "]", "for", "tup", "in", "list_of_tups", "]", ")", ")", "# Regarding containment checking while having extra conditions,", "# there's no good way to map each anagram or subseuqnece etc. that was", "# found to the query word, without making it more complicated than", "# it already is, because a query word can be anagram/subsequence etc.", "# to multiple words of the timestamps yet finding the one with the", "# right index would be the problem.", "# Therefore we just approximate the solution by just counting", "# the elements.", "if", "len", "(", "sub", ")", ">", "len", "(", "sup", ")", ":", "return", "False", "for", "pred", ",", "func", "in", "set", "(", "[", "(", "anagram", ",", "self", ".", "_is_anagram_of", ")", ",", "(", "subsequence", ",", "self", ".", "_is_subsequence_of", ")", ",", "(", "supersequence", ",", "self", ".", "_is_supersequence_of", ")", "]", ")", ":", "if", "pred", ":", "pred_seive", "=", "[", "(", "sub_key", ",", "sup_key", ")", "for", "sub_key", "in", "set", "(", "Counter", "(", "sub", ")", ".", "keys", "(", ")", ")", "for", "sup_key", "in", "set", "(", "Counter", "(", "sup", ")", ".", "keys", "(", ")", ")", "if", "func", "(", "sub_key", ",", "sup_key", ")", "]", "if", "not", "extra_freq_check", "(", "sub", ",", "sup", ",", "pred_seive", ")", ":", "return", "False", "if", "(", "not", "any", "(", "[", "anagram", ",", "subsequence", ",", "supersequence", "]", ")", "and", "(", "not", "containment_check", "(", "sub", ",", "sup", ")", "or", "not", "containment_freq_check", "(", "sub", ",", "sup", ")", ")", ")", ":", "return", "False", "for", "x1", ",", "x2", "in", "zip", "(", "get_all_in", "(", "sup", ",", "sub", ")", ",", "get_all_in", "(", "sub", ",", "sup", ")", ")", ":", "if", "x1", "!=", "x2", ":", "return", "False", "return", "True" ]
39.08046
0.001147
def devnull(): """Temporarily redirect stdout and stderr to /dev/null.""" try: original_stderr = os.dup(sys.stderr.fileno()) original_stdout = os.dup(sys.stdout.fileno()) null = open(os.devnull, 'w') os.dup2(null.fileno(), sys.stderr.fileno()) os.dup2(null.fileno(), sys.stdout.fileno()) yield finally: if original_stderr is not None: os.dup2(original_stderr, sys.stderr.fileno()) if original_stdout is not None: os.dup2(original_stdout, sys.stdout.fileno()) if null is not None: null.close()
[ "def", "devnull", "(", ")", ":", "try", ":", "original_stderr", "=", "os", ".", "dup", "(", "sys", ".", "stderr", ".", "fileno", "(", ")", ")", "original_stdout", "=", "os", ".", "dup", "(", "sys", ".", "stdout", ".", "fileno", "(", ")", ")", "null", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "os", ".", "dup2", "(", "null", ".", "fileno", "(", ")", ",", "sys", ".", "stderr", ".", "fileno", "(", ")", ")", "os", ".", "dup2", "(", "null", ".", "fileno", "(", ")", ",", "sys", ".", "stdout", ".", "fileno", "(", ")", ")", "yield", "finally", ":", "if", "original_stderr", "is", "not", "None", ":", "os", ".", "dup2", "(", "original_stderr", ",", "sys", ".", "stderr", ".", "fileno", "(", ")", ")", "if", "original_stdout", "is", "not", "None", ":", "os", ".", "dup2", "(", "original_stdout", ",", "sys", ".", "stdout", ".", "fileno", "(", ")", ")", "if", "null", "is", "not", "None", ":", "null", ".", "close", "(", ")" ]
33.166667
0.001629
def get_prep_value(self, value): """ Return the integer value to be stored from the hex string """ if value is None or value == "": return None if isinstance(value, six.string_types): value = _hex_string_to_unsigned_integer(value) if _using_signed_storage(): value = _unsigned_to_signed_integer(value) return value
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "or", "value", "==", "\"\"", ":", "return", "None", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "value", "=", "_hex_string_to_unsigned_integer", "(", "value", ")", "if", "_using_signed_storage", "(", ")", ":", "value", "=", "_unsigned_to_signed_integer", "(", "value", ")", "return", "value" ]
36.111111
0.03003
def _percent(data, part, total): """ Calculate a percentage. """ try: return round(100 * float(data[part]) / float(data[total]), 1) except ZeroDivisionError: return 0
[ "def", "_percent", "(", "data", ",", "part", ",", "total", ")", ":", "try", ":", "return", "round", "(", "100", "*", "float", "(", "data", "[", "part", "]", ")", "/", "float", "(", "data", "[", "total", "]", ")", ",", "1", ")", "except", "ZeroDivisionError", ":", "return", "0" ]
24.375
0.00495
def ResultCollectionForFID(cls, flow_id): """Returns the ResultCollection for the flow with a given flow_id. Args: flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456. Returns: The collection containing the results for the flow identified by the id. """ # TODO: Disallow/remove URNs after migration. if not isinstance(flow_id, rdfvalue.RDFURN): flow_id = rdfvalue.RDFURN(flow_id) return sequential_collection.GeneralIndexedCollection( flow_id.Add(RESULTS_SUFFIX))
[ "def", "ResultCollectionForFID", "(", "cls", ",", "flow_id", ")", ":", "# TODO: Disallow/remove URNs after migration.", "if", "not", "isinstance", "(", "flow_id", ",", "rdfvalue", ".", "RDFURN", ")", ":", "flow_id", "=", "rdfvalue", ".", "RDFURN", "(", "flow_id", ")", "return", "sequential_collection", ".", "GeneralIndexedCollection", "(", "flow_id", ".", "Add", "(", "RESULTS_SUFFIX", ")", ")" ]
35
0.003711
def _homogenize_linesep(line): """Enforce line separators to be the right one depending on platform.""" token = str(uuid.uuid4()) line = line.replace(os.linesep, token).replace("\n", "").replace("\r", "") return line.replace(token, os.linesep)
[ "def", "_homogenize_linesep", "(", "line", ")", ":", "token", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "line", "=", "line", ".", "replace", "(", "os", ".", "linesep", ",", "token", ")", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\r\"", ",", "\"\"", ")", "return", "line", ".", "replace", "(", "token", ",", "os", ".", "linesep", ")" ]
51
0.003861
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT): """ With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps Arguments: [uuids]: list of UUIDs [start, end]: time references: [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ if not isinstance(uuids, list): uuids = [uuids] where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids]) return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
[ "def", "data_uuids", "(", "self", ",", "uuids", ",", "start", ",", "end", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "if", "not", "isinstance", "(", "uuids", ",", "list", ")", ":", "uuids", "=", "[", "uuids", "]", "where", "=", "\" or \"", ".", "join", "(", "[", "'uuid = \"{0}\"'", ".", "format", "(", "uuid", ")", "for", "uuid", "in", "uuids", "]", ")", "return", "self", ".", "query", "(", "\"select data in ({0}, {1}) where {2}\"", ".", "format", "(", "start", ",", "end", ",", "where", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'timeseries'", ",", "{", "}", ")" ]
52.533333
0.007481
def default(self, obj): """Serialize obj into JSON.""" # pylint: disable=method-hidden, protected-access, arguments-differ if isinstance(obj, Sensor): return { 'sensor_id': obj.sensor_id, 'children': obj.children, 'type': obj.type, 'sketch_name': obj.sketch_name, 'sketch_version': obj.sketch_version, 'battery_level': obj.battery_level, 'protocol_version': obj.protocol_version, 'heartbeat': obj.heartbeat, } if isinstance(obj, ChildSensor): return { 'id': obj.id, 'type': obj.type, 'description': obj.description, 'values': obj.values, } return json.JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "# pylint: disable=method-hidden, protected-access, arguments-differ", "if", "isinstance", "(", "obj", ",", "Sensor", ")", ":", "return", "{", "'sensor_id'", ":", "obj", ".", "sensor_id", ",", "'children'", ":", "obj", ".", "children", ",", "'type'", ":", "obj", ".", "type", ",", "'sketch_name'", ":", "obj", ".", "sketch_name", ",", "'sketch_version'", ":", "obj", ".", "sketch_version", ",", "'battery_level'", ":", "obj", ".", "battery_level", ",", "'protocol_version'", ":", "obj", ".", "protocol_version", ",", "'heartbeat'", ":", "obj", ".", "heartbeat", ",", "}", "if", "isinstance", "(", "obj", ",", "ChildSensor", ")", ":", "return", "{", "'id'", ":", "obj", ".", "id", ",", "'type'", ":", "obj", ".", "type", ",", "'description'", ":", "obj", ".", "description", ",", "'values'", ":", "obj", ".", "values", ",", "}", "return", "json", ".", "JSONEncoder", ".", "default", "(", "self", ",", "obj", ")" ]
38.227273
0.00232
def wait(self, timeout=None): """Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed). """ us = -1 if timeout is None else int(timeout * 1000000) return super(Reader, self).wait(us)
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "us", "=", "-", "1", "if", "timeout", "is", "None", "else", "int", "(", "timeout", "*", "1000000", ")", "return", "super", "(", "Reader", ",", "self", ")", ".", "wait", "(", "us", ")" ]
38.75
0.006303
def get_password(prompt='Password: ', confirm=False): """ <Purpose> Return the password entered by the user. If 'confirm' is True, the user is asked to enter the previously entered password once again. If they match, the password is returned to the caller. <Arguments> prompt: The text of the password prompt that is displayed to the user. confirm: Boolean indicating whether the user should be prompted for the password a second time. The two entered password must match, otherwise the user is again prompted for a password. <Exceptions> None. <Side Effects> None. <Returns> The password entered by the user. """ # Are the arguments the expected type? # If not, raise 'securesystemslib.exceptions.FormatError'. securesystemslib.formats.TEXT_SCHEMA.check_match(prompt) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(confirm) while True: # getpass() prompts the user for a password without echoing # the user input. password = getpass.getpass(prompt, sys.stderr) if not confirm: return password password2 = getpass.getpass('Confirm: ', sys.stderr) if password == password2: return password else: print('Mismatch; try again.')
[ "def", "get_password", "(", "prompt", "=", "'Password: '", ",", "confirm", "=", "False", ")", ":", "# Are the arguments the expected type?", "# If not, raise 'securesystemslib.exceptions.FormatError'.", "securesystemslib", ".", "formats", ".", "TEXT_SCHEMA", ".", "check_match", "(", "prompt", ")", "securesystemslib", ".", "formats", ".", "BOOLEAN_SCHEMA", ".", "check_match", "(", "confirm", ")", "while", "True", ":", "# getpass() prompts the user for a password without echoing", "# the user input.", "password", "=", "getpass", ".", "getpass", "(", "prompt", ",", "sys", ".", "stderr", ")", "if", "not", "confirm", ":", "return", "password", "password2", "=", "getpass", ".", "getpass", "(", "'Confirm: '", ",", "sys", ".", "stderr", ")", "if", "password", "==", "password2", ":", "return", "password", "else", ":", "print", "(", "'Mismatch; try again.'", ")" ]
27.2
0.007886
def add_general_optgroup(parser): """ option group for general-use features of all javatool CLIs """ g = parser.add_argument_group("General Options") g.add_argument("-q", "--quiet", dest="silent", action="store_true", default=False) g.add_argument("-v", "--verbose", nargs=0, action=_opt_cb_verbose) g.add_argument("-o", "--output", dest="output", default=None) g.add_argument("-j", "--json", dest="json", action="store_true", default=False) g.add_argument("--show-ignored", action="store_true", default=False) g.add_argument("--show-unchanged", action="store_true", default=False) g.add_argument("--ignore", action=_opt_cb_ignore, help="comma-separated list of ignores")
[ "def", "add_general_optgroup", "(", "parser", ")", ":", "g", "=", "parser", ".", "add_argument_group", "(", "\"General Options\"", ")", "g", ".", "add_argument", "(", "\"-q\"", ",", "\"--quiet\"", ",", "dest", "=", "\"silent\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ")", "g", ".", "add_argument", "(", "\"-v\"", ",", "\"--verbose\"", ",", "nargs", "=", "0", ",", "action", "=", "_opt_cb_verbose", ")", "g", ".", "add_argument", "(", "\"-o\"", ",", "\"--output\"", ",", "dest", "=", "\"output\"", ",", "default", "=", "None", ")", "g", ".", "add_argument", "(", "\"-j\"", ",", "\"--json\"", ",", "dest", "=", "\"json\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ")", "g", ".", "add_argument", "(", "\"--show-ignored\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ")", "g", ".", "add_argument", "(", "\"--show-unchanged\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ")", "g", ".", "add_argument", "(", "\"--ignore\"", ",", "action", "=", "_opt_cb_ignore", ",", "help", "=", "\"comma-separated list of ignores\"", ")" ]
34.454545
0.001284
def add_error(self, property_name, message): """Add an error for the given property.""" if property_name not in self.errors: self.errors[property_name] = [] self.errors[property_name].append(message)
[ "def", "add_error", "(", "self", ",", "property_name", ",", "message", ")", ":", "if", "property_name", "not", "in", "self", ".", "errors", ":", "self", ".", "errors", "[", "property_name", "]", "=", "[", "]", "self", ".", "errors", "[", "property_name", "]", ".", "append", "(", "message", ")" ]
42.6
0.009217
def image(self,path_img): """ Open image file """ im_open = Image.open(path_img) im = im_open.convert("RGB") # Convert the RGB image in printable image pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
[ "def", "image", "(", "self", ",", "path_img", ")", ":", "im_open", "=", "Image", ".", "open", "(", "path_img", ")", "im", "=", "im_open", ".", "convert", "(", "\"RGB\"", ")", "# Convert the RGB image in printable image", "pix_line", ",", "img_size", "=", "self", ".", "_convert_image", "(", "im", ")", "self", ".", "_print_image", "(", "pix_line", ",", "img_size", ")" ]
39.428571
0.010638
def _parse_openssh_output(lines, fingerprint_hash_type=None): ''' Helper function which parses ssh-keygen -F and ssh-keyscan function output and yield dict with keys information, one by one. ''' for line in lines: # We don't need any whitespace-only containing lines or arbitrary doubled newlines line = line.strip() if line == '': continue line += '\n' if line.startswith('#'): continue try: hostname, enc, key = line.split() except ValueError: # incorrect format continue fingerprint = _fingerprint(key, fingerprint_hash_type=fingerprint_hash_type) if not fingerprint: continue yield {'hostname': hostname, 'key': key, 'enc': enc, 'fingerprint': fingerprint}
[ "def", "_parse_openssh_output", "(", "lines", ",", "fingerprint_hash_type", "=", "None", ")", ":", "for", "line", "in", "lines", ":", "# We don't need any whitespace-only containing lines or arbitrary doubled newlines", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "==", "''", ":", "continue", "line", "+=", "'\\n'", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "try", ":", "hostname", ",", "enc", ",", "key", "=", "line", ".", "split", "(", ")", "except", "ValueError", ":", "# incorrect format", "continue", "fingerprint", "=", "_fingerprint", "(", "key", ",", "fingerprint_hash_type", "=", "fingerprint_hash_type", ")", "if", "not", "fingerprint", ":", "continue", "yield", "{", "'hostname'", ":", "hostname", ",", "'key'", ":", "key", ",", "'enc'", ":", "enc", ",", "'fingerprint'", ":", "fingerprint", "}" ]
35.375
0.002294
def _is_known_unsigned_by_dtype(dt): """Helper returning True if dtype is known to be unsigned.""" return { tf.bool: True, tf.uint8: True, tf.uint16: True, }.get(dt.base_dtype, False)
[ "def", "_is_known_unsigned_by_dtype", "(", "dt", ")", ":", "return", "{", "tf", ".", "bool", ":", "True", ",", "tf", ".", "uint8", ":", "True", ",", "tf", ".", "uint16", ":", "True", ",", "}", ".", "get", "(", "dt", ".", "base_dtype", ",", "False", ")" ]
28.714286
0.014493
def chhome(name, home, **kwargs): ''' Change the home directory of the user, pass True for persist to move files to the new home directory if the old home directory exist. Args: name (str): The name of the user whose home directory you wish to change home (str): The new location of the home directory Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.chhome foo \\\\fileserver\\home\\foo True ''' if six.PY2: name = _to_unicode(name) home = _to_unicode(home) kwargs = salt.utils.args.clean_kwargs(**kwargs) persist = kwargs.pop('persist', False) if kwargs: salt.utils.args.invalid_kwargs(kwargs) if persist: log.info('Ignoring unsupported \'persist\' argument to user.chhome') pre_info = info(name) if not pre_info: return False if home == pre_info['home']: return True if not update(name=name, home=home): return False post_info = info(name) if post_info['home'] != pre_info['home']: return post_info['home'] == home return False
[ "def", "chhome", "(", "name", ",", "home", ",", "*", "*", "kwargs", ")", ":", "if", "six", ".", "PY2", ":", "name", "=", "_to_unicode", "(", "name", ")", "home", "=", "_to_unicode", "(", "home", ")", "kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "kwargs", ")", "persist", "=", "kwargs", ".", "pop", "(", "'persist'", ",", "False", ")", "if", "kwargs", ":", "salt", ".", "utils", ".", "args", ".", "invalid_kwargs", "(", "kwargs", ")", "if", "persist", ":", "log", ".", "info", "(", "'Ignoring unsupported \\'persist\\' argument to user.chhome'", ")", "pre_info", "=", "info", "(", "name", ")", "if", "not", "pre_info", ":", "return", "False", "if", "home", "==", "pre_info", "[", "'home'", "]", ":", "return", "True", "if", "not", "update", "(", "name", "=", "name", ",", "home", "=", "home", ")", ":", "return", "False", "post_info", "=", "info", "(", "name", ")", "if", "post_info", "[", "'home'", "]", "!=", "pre_info", "[", "'home'", "]", ":", "return", "post_info", "[", "'home'", "]", "==", "home", "return", "False" ]
24.434783
0.001711
def delete_app(self, app_name): """ Delete the given app. Will fail intentionally if there are any service bindings. You must delete those first. """ if app_name not in self.space.get_apps(): logging.warning("App not found so... succeeded?") return True guid = self.get_app_guid(app_name) self.api.delete("/v2/apps/%s" % (guid))
[ "def", "delete_app", "(", "self", ",", "app_name", ")", ":", "if", "app_name", "not", "in", "self", ".", "space", ".", "get_apps", "(", ")", ":", "logging", ".", "warning", "(", "\"App not found so... succeeded?\"", ")", "return", "True", "guid", "=", "self", ".", "get_app_guid", "(", "app_name", ")", "self", ".", "api", ".", "delete", "(", "\"/v2/apps/%s\"", "%", "(", "guid", ")", ")" ]
31.307692
0.004773
def transform_grid_to_reference_frame(self, grid): """Transform a grid of (y,x) coordinates to the reference frame of the profile, including a translation to \ its centre and a rotation to it orientation. Parameters ---------- grid : ndarray The (y, x) coordinates in the original reference frame of the grid. """ if self.__class__.__name__.startswith("Spherical"): return super().transform_grid_to_reference_frame(grid) shifted_coordinates = np.subtract(grid, self.centre) radius = np.sqrt(np.sum(shifted_coordinates ** 2.0, 1)) theta_coordinate_to_profile = np.arctan2(shifted_coordinates[:, 0], shifted_coordinates[:, 1]) - self.phi_radians transformed = np.vstack( (radius * np.sin(theta_coordinate_to_profile), radius * np.cos(theta_coordinate_to_profile))).T return transformed.view(TransformedGrid)
[ "def", "transform_grid_to_reference_frame", "(", "self", ",", "grid", ")", ":", "if", "self", ".", "__class__", ".", "__name__", ".", "startswith", "(", "\"Spherical\"", ")", ":", "return", "super", "(", ")", ".", "transform_grid_to_reference_frame", "(", "grid", ")", "shifted_coordinates", "=", "np", ".", "subtract", "(", "grid", ",", "self", ".", "centre", ")", "radius", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "shifted_coordinates", "**", "2.0", ",", "1", ")", ")", "theta_coordinate_to_profile", "=", "np", ".", "arctan2", "(", "shifted_coordinates", "[", ":", ",", "0", "]", ",", "shifted_coordinates", "[", ":", ",", "1", "]", ")", "-", "self", ".", "phi_radians", "transformed", "=", "np", ".", "vstack", "(", "(", "radius", "*", "np", ".", "sin", "(", "theta_coordinate_to_profile", ")", ",", "radius", "*", "np", ".", "cos", "(", "theta_coordinate_to_profile", ")", ")", ")", ".", "T", "return", "transformed", ".", "view", "(", "TransformedGrid", ")" ]
53.888889
0.005066
def http_sa_http_server_shutdown(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") http_sa = ET.SubElement(config, "http-sa", xmlns="urn:brocade.com:mgmt:brocade-http") http = ET.SubElement(http_sa, "http") server = ET.SubElement(http, "server") shutdown = ET.SubElement(server, "shutdown") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "http_sa_http_server_shutdown", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "http_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"http-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-http\"", ")", "http", "=", "ET", ".", "SubElement", "(", "http_sa", ",", "\"http\"", ")", "server", "=", "ET", ".", "SubElement", "(", "http", ",", "\"server\"", ")", "shutdown", "=", "ET", ".", "SubElement", "(", "server", ",", "\"shutdown\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
41
0.006508
def message_for_exception(exception: Exception, message: str) -> Sequence[Token]: """ Returns a tuple suitable for cli_ui.error() from the given exception. (Traceback will be part of the message, after the ``message`` argument) Useful when the exception occurs in an other thread than the main one. """ tb = sys.exc_info()[2] buffer = io.StringIO() traceback.print_tb(tb, file=io) # type: ignore # fmt: off return ( red, message + "\n", exception.__class__.__name__, str(exception), "\n", reset, buffer.getvalue() )
[ "def", "message_for_exception", "(", "exception", ":", "Exception", ",", "message", ":", "str", ")", "->", "Sequence", "[", "Token", "]", ":", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "buffer", "=", "io", ".", "StringIO", "(", ")", "traceback", ".", "print_tb", "(", "tb", ",", "file", "=", "io", ")", "# type: ignore", "# fmt: off", "return", "(", "red", ",", "message", "+", "\"\\n\"", ",", "exception", ".", "__class__", ".", "__name__", ",", "str", "(", "exception", ")", ",", "\"\\n\"", ",", "reset", ",", "buffer", ".", "getvalue", "(", ")", ")" ]
29.15
0.003322
def find_python(): """Search for Python automatically""" python = ( _state.get("pythonExecutable") or # Support for multiple executables. next(( exe for exe in os.getenv("PYBLISH_QML_PYTHON_EXECUTABLE", "").split(os.pathsep) if os.path.isfile(exe)), None ) or # Search PATH for executables. which("python") or which("python3") ) if not python or not os.path.isfile(python): raise ValueError("Could not locate Python executable.") return python
[ "def", "find_python", "(", ")", ":", "python", "=", "(", "_state", ".", "get", "(", "\"pythonExecutable\"", ")", "or", "# Support for multiple executables.", "next", "(", "(", "exe", "for", "exe", "in", "os", ".", "getenv", "(", "\"PYBLISH_QML_PYTHON_EXECUTABLE\"", ",", "\"\"", ")", ".", "split", "(", "os", ".", "pathsep", ")", "if", "os", ".", "path", ".", "isfile", "(", "exe", ")", ")", ",", "None", ")", "or", "# Search PATH for executables.", "which", "(", "\"python\"", ")", "or", "which", "(", "\"python3\"", ")", ")", "if", "not", "python", "or", "not", "os", ".", "path", ".", "isfile", "(", "python", ")", ":", "raise", "ValueError", "(", "\"Could not locate Python executable.\"", ")", "return", "python" ]
26.047619
0.001764
def mi_chain_rule(X, y): ''' Decompose the information between all X and y according to the chain rule and return all the terms in the chain rule. Inputs: ------- X: iterable of iterables. You should be able to compute [mi(x, y) for x in X] y: iterable of symbols output: ------- ndarray: terms of chaing rule Implemenation notes: I(X; y) = I(x0, x1, ..., xn; y) = I(x0; y) + I(x1;y | x0) + I(x2; y | x0, x1) + ... + I(xn; y | x0, x1, ..., xn-1) ''' # allocate ndarray output chain = np.zeros(len(X)) # first term in the expansion is not a conditional information, but the information between the first x and y chain[0] = mi(X[0], y) for i in range(1, len(X)): chain[i] = cond_mi(X[i], y, X[:i]) return chain
[ "def", "mi_chain_rule", "(", "X", ",", "y", ")", ":", "# allocate ndarray output", "chain", "=", "np", ".", "zeros", "(", "len", "(", "X", ")", ")", "# first term in the expansion is not a conditional information, but the information between the first x and y", "chain", "[", "0", "]", "=", "mi", "(", "X", "[", "0", "]", ",", "y", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "X", ")", ")", ":", "chain", "[", "i", "]", "=", "cond_mi", "(", "X", "[", "i", "]", ",", "y", ",", "X", "[", ":", "i", "]", ")", "return", "chain" ]
28.310345
0.005889
def crossspec(data, tbin, Df=None, units=False, pointProcess=False): """ Calculate (smoothed) cross spectra of data. If `units`=True, cross spectra are averaged across units. Note that averaging is done on cross spectra rather than data. Cross spectra are normalized by the length T of the time series -> no scaling with T. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing units : bool, average cross spectrum pointProcess : bool, if set to True, cross spectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies CRO : tuple if `units`=True: 1 dim numpy.ndarray; frequency series if `units`=False:3 dim numpy.ndarray; 1st axis first unit, 2nd axis second unit, 3rd axis frequency Examples -------- >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (len(analog_sig1),) """ N = len(data) if units is True: # smoothing and normalization take place in powerspec # and compound_powerspec freq, POW = powerspec(data, tbin, Df=Df, units=True) freq_com, CPOW = compound_powerspec(data, tbin, Df=Df) assert(len(freq) == len(freq_com)) assert(np.min(freq) == np.min(freq_com)) assert(np.max(freq) == np.max(freq_com)) CRO = 1. / (1. * N * (N - 1.)) * (CPOW - 1. * N * POW) assert(len(freq) == len(CRO)) else: freq, DATA = calculate_fft(data, tbin) T = tbin * len(freq) df = freq[1] - freq[0] if Df is not None: cut = int(Df / df) freq = freq[cut:] CRO = np.zeros((N, N, len(freq)), dtype=complex) for i in range(N): for j in range(i + 1): tempij = DATA[i] * DATA[j].conj() if Df is not None: tempij = movav(tempij, Df, df)[cut:] CRO[i, j] = tempij CRO[j, i] = CRO[i, j].conj() assert(len(freq) == len(CRO[0, 0])) if pointProcess: CRO *= 1. / T * 1e3 # normalization return freq, CRO
[ "def", "crossspec", "(", "data", ",", "tbin", ",", "Df", "=", "None", ",", "units", "=", "False", ",", "pointProcess", "=", "False", ")", ":", "N", "=", "len", "(", "data", ")", "if", "units", "is", "True", ":", "# smoothing and normalization take place in powerspec", "# and compound_powerspec", "freq", ",", "POW", "=", "powerspec", "(", "data", ",", "tbin", ",", "Df", "=", "Df", ",", "units", "=", "True", ")", "freq_com", ",", "CPOW", "=", "compound_powerspec", "(", "data", ",", "tbin", ",", "Df", "=", "Df", ")", "assert", "(", "len", "(", "freq", ")", "==", "len", "(", "freq_com", ")", ")", "assert", "(", "np", ".", "min", "(", "freq", ")", "==", "np", ".", "min", "(", "freq_com", ")", ")", "assert", "(", "np", ".", "max", "(", "freq", ")", "==", "np", ".", "max", "(", "freq_com", ")", ")", "CRO", "=", "1.", "/", "(", "1.", "*", "N", "*", "(", "N", "-", "1.", ")", ")", "*", "(", "CPOW", "-", "1.", "*", "N", "*", "POW", ")", "assert", "(", "len", "(", "freq", ")", "==", "len", "(", "CRO", ")", ")", "else", ":", "freq", ",", "DATA", "=", "calculate_fft", "(", "data", ",", "tbin", ")", "T", "=", "tbin", "*", "len", "(", "freq", ")", "df", "=", "freq", "[", "1", "]", "-", "freq", "[", "0", "]", "if", "Df", "is", "not", "None", ":", "cut", "=", "int", "(", "Df", "/", "df", ")", "freq", "=", "freq", "[", "cut", ":", "]", "CRO", "=", "np", ".", "zeros", "(", "(", "N", ",", "N", ",", "len", "(", "freq", ")", ")", ",", "dtype", "=", "complex", ")", "for", "i", "in", "range", "(", "N", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ")", ":", "tempij", "=", "DATA", "[", "i", "]", "*", "DATA", "[", "j", "]", ".", "conj", "(", ")", "if", "Df", "is", "not", "None", ":", "tempij", "=", "movav", "(", "tempij", ",", "Df", ",", "df", ")", "[", "cut", ":", "]", "CRO", "[", "i", ",", "j", "]", "=", "tempij", "CRO", "[", "j", ",", "i", "]", "=", "CRO", "[", "i", ",", "j", "]", ".", "conj", "(", ")", "assert", "(", "len", "(", "freq", ")", "==", "len", "(", "CRO", "[", "0", ",", "0", "]", ")", ")", "if", "pointProcess", ":", "CRO", "*=", "1.", "/", "T", "*", "1e3", "# normalization", "return", "freq", ",", "CRO" ]
31.390244
0.00339
def crypto_box_afternm(message, nonce, k): """ Encrypts and returns the message ``message`` using the shared key ``k`` and the nonce ``nonce``. :param message: bytes :param nonce: bytes :param k: bytes :rtype: bytes """ if len(nonce) != crypto_box_NONCEBYTES: raise exc.ValueError("Invalid nonce") if len(k) != crypto_box_BEFORENMBYTES: raise exc.ValueError("Invalid shared key") padded = b"\x00" * crypto_box_ZEROBYTES + message ciphertext = ffi.new("unsigned char[]", len(padded)) rc = lib.crypto_box_afternm(ciphertext, padded, len(padded), nonce, k) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(ciphertext, len(padded))[crypto_box_BOXZEROBYTES:]
[ "def", "crypto_box_afternm", "(", "message", ",", "nonce", ",", "k", ")", ":", "if", "len", "(", "nonce", ")", "!=", "crypto_box_NONCEBYTES", ":", "raise", "exc", ".", "ValueError", "(", "\"Invalid nonce\"", ")", "if", "len", "(", "k", ")", "!=", "crypto_box_BEFORENMBYTES", ":", "raise", "exc", ".", "ValueError", "(", "\"Invalid shared key\"", ")", "padded", "=", "b\"\\x00\"", "*", "crypto_box_ZEROBYTES", "+", "message", "ciphertext", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "len", "(", "padded", ")", ")", "rc", "=", "lib", ".", "crypto_box_afternm", "(", "ciphertext", ",", "padded", ",", "len", "(", "padded", ")", ",", "nonce", ",", "k", ")", "ensure", "(", "rc", "==", "0", ",", "'Unexpected library error'", ",", "raising", "=", "exc", ".", "RuntimeError", ")", "return", "ffi", ".", "buffer", "(", "ciphertext", ",", "len", "(", "padded", ")", ")", "[", "crypto_box_BOXZEROBYTES", ":", "]" ]
30.84
0.001258
def methods_to_table(obj): r""" """ parent = obj.__class__.__mro__[1] temp = inspect.getmembers(parent, predicate=inspect.isroutine) parent_funcs = [i[0] for i in temp if not i[0].startswith('_')] temp = inspect.getmembers(obj.__class__, predicate=inspect.isroutine) obj_funcs = [i[0] for i in temp if not i[0].startswith('_')] funcs = set(obj_funcs).difference(set(parent_funcs)) row = '+' + '-'*22 + '+' + '-'*49 + '+' fmt = '{0:1s} {1:20s} {2:1s} {3:47s} {4:1s}' lines = [] lines.append(row) lines.append(fmt.format('|', 'Method', '|', 'Description', '|')) lines.append(row.replace('-', '=')) for i, item in enumerate(funcs): try: s = getattr(obj, item).__doc__.strip() end = s.find('\n') if end > 47: s = s[:44] + '...' lines.append(fmt.format('|', item, '|', s[:end], '|')) lines.append(row) except AttributeError: pass return '\n'.join(lines)
[ "def", "methods_to_table", "(", "obj", ")", ":", "parent", "=", "obj", ".", "__class__", ".", "__mro__", "[", "1", "]", "temp", "=", "inspect", ".", "getmembers", "(", "parent", ",", "predicate", "=", "inspect", ".", "isroutine", ")", "parent_funcs", "=", "[", "i", "[", "0", "]", "for", "i", "in", "temp", "if", "not", "i", "[", "0", "]", ".", "startswith", "(", "'_'", ")", "]", "temp", "=", "inspect", ".", "getmembers", "(", "obj", ".", "__class__", ",", "predicate", "=", "inspect", ".", "isroutine", ")", "obj_funcs", "=", "[", "i", "[", "0", "]", "for", "i", "in", "temp", "if", "not", "i", "[", "0", "]", ".", "startswith", "(", "'_'", ")", "]", "funcs", "=", "set", "(", "obj_funcs", ")", ".", "difference", "(", "set", "(", "parent_funcs", ")", ")", "row", "=", "'+'", "+", "'-'", "*", "22", "+", "'+'", "+", "'-'", "*", "49", "+", "'+'", "fmt", "=", "'{0:1s} {1:20s} {2:1s} {3:47s} {4:1s}'", "lines", "=", "[", "]", "lines", ".", "append", "(", "row", ")", "lines", ".", "append", "(", "fmt", ".", "format", "(", "'|'", ",", "'Method'", ",", "'|'", ",", "'Description'", ",", "'|'", ")", ")", "lines", ".", "append", "(", "row", ".", "replace", "(", "'-'", ",", "'='", ")", ")", "for", "i", ",", "item", "in", "enumerate", "(", "funcs", ")", ":", "try", ":", "s", "=", "getattr", "(", "obj", ",", "item", ")", ".", "__doc__", ".", "strip", "(", ")", "end", "=", "s", ".", "find", "(", "'\\n'", ")", "if", "end", ">", "47", ":", "s", "=", "s", "[", ":", "44", "]", "+", "'...'", "lines", ".", "append", "(", "fmt", ".", "format", "(", "'|'", ",", "item", ",", "'|'", ",", "s", "[", ":", "end", "]", ",", "'|'", ")", ")", "lines", ".", "append", "(", "row", ")", "except", "AttributeError", ":", "pass", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
35.392857
0.000982
def seconds_to_str_fromatter(_format): """ Accepted format directives: %i %s %m %h """ # check directives are correct if _format == "%S": def _fromatter(seconds): return "{:.2f}".format(seconds) elif _format == "%I": def _fromatter(seconds): return "{0}".format(int(seconds * 1000)) else: _format = _format.replace("%h", "{hrs:02d}") _format = _format.replace("%m", "{mins:02d}") _format = _format.replace("%s", "{secs:02d}") _format = _format.replace("%i", "{millis:03d}") try: i = _format.index("%") raise TimeFormatError("Unknow time format directive '{0}'".format(_format[i:i+2])) except ValueError: pass def _fromatter(seconds): millis = int(seconds * 1000) hrs, millis = divmod(millis, 3600000) mins, millis = divmod(millis, 60000) secs, millis = divmod(millis, 1000) return _format.format(hrs=hrs, mins=mins, secs=secs, millis=millis) return _fromatter
[ "def", "seconds_to_str_fromatter", "(", "_format", ")", ":", "# check directives are correct ", "if", "_format", "==", "\"%S\"", ":", "def", "_fromatter", "(", "seconds", ")", ":", "return", "\"{:.2f}\"", ".", "format", "(", "seconds", ")", "elif", "_format", "==", "\"%I\"", ":", "def", "_fromatter", "(", "seconds", ")", ":", "return", "\"{0}\"", ".", "format", "(", "int", "(", "seconds", "*", "1000", ")", ")", "else", ":", "_format", "=", "_format", ".", "replace", "(", "\"%h\"", ",", "\"{hrs:02d}\"", ")", "_format", "=", "_format", ".", "replace", "(", "\"%m\"", ",", "\"{mins:02d}\"", ")", "_format", "=", "_format", ".", "replace", "(", "\"%s\"", ",", "\"{secs:02d}\"", ")", "_format", "=", "_format", ".", "replace", "(", "\"%i\"", ",", "\"{millis:03d}\"", ")", "try", ":", "i", "=", "_format", ".", "index", "(", "\"%\"", ")", "raise", "TimeFormatError", "(", "\"Unknow time format directive '{0}'\"", ".", "format", "(", "_format", "[", "i", ":", "i", "+", "2", "]", ")", ")", "except", "ValueError", ":", "pass", "def", "_fromatter", "(", "seconds", ")", ":", "millis", "=", "int", "(", "seconds", "*", "1000", ")", "hrs", ",", "millis", "=", "divmod", "(", "millis", ",", "3600000", ")", "mins", ",", "millis", "=", "divmod", "(", "millis", ",", "60000", ")", "secs", ",", "millis", "=", "divmod", "(", "millis", ",", "1000", ")", "return", "_format", ".", "format", "(", "hrs", "=", "hrs", ",", "mins", "=", "mins", ",", "secs", "=", "secs", ",", "millis", "=", "millis", ")", "return", "_fromatter" ]
32.029412
0.008021
def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
[ "def", "select_as_coordinates", "(", "self", ",", "key", ",", "where", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "where", "=", "_ensure_term", "(", "where", ",", "scope_level", "=", "1", ")", "return", "self", ".", "get_storer", "(", "key", ")", ".", "read_coordinates", "(", "where", "=", "where", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "*", "*", "kwargs", ")" ]
41.4
0.00315
def has_files(the_path): """Given a path, returns whether the path has any files in it or any subfolders. Works recursively.""" the_path = Path(the_path) try: for _ in the_path.walkfiles(): return True return False except OSError as ex: if ex.errno == errno.ENOENT: # ignore return False else: raise
[ "def", "has_files", "(", "the_path", ")", ":", "the_path", "=", "Path", "(", "the_path", ")", "try", ":", "for", "_", "in", "the_path", ".", "walkfiles", "(", ")", ":", "return", "True", "return", "False", "except", "OSError", "as", "ex", ":", "if", "ex", ".", "errno", "==", "errno", ".", "ENOENT", ":", "# ignore", "return", "False", "else", ":", "raise" ]
29.461538
0.005063
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # this callback is used to prevent an auto-migration from being generated # when there are no changes to the schema # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html def process_revision_directives(context, revision, directives): if getattr(config.cmd_opts, 'autogenerate', False): script = directives[0] if len(script.upgrade_ops_list) >= len(bind_names) + 1: empty = True for upgrade_ops in script.upgrade_ops_list: if not upgrade_ops.is_empty(): empty = False if empty: directives[:] = [] logger.info('No changes in schema detected.') # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = { '': { 'engine': engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool, ) } } for name in bind_names: engines[name] = rec = {} rec['engine'] = engine_from_config( context.config.get_section(name), prefix='sqlalchemy.', poolclass=pool.NullPool) for name, rec in engines.items(): engine = rec['engine'] rec['connection'] = conn = engine.connect() if USE_TWOPHASE: rec['transaction'] = conn.begin_twophase() else: rec['transaction'] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) context.configure( connection=rec['connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=get_metadata(name), process_revision_directives=process_revision_directives, **current_app.extensions['migrate'].configure_args ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec['transaction'].prepare() for rec in engines.values(): rec['transaction'].commit() except: for rec in engines.values(): rec['transaction'].rollback() raise finally: for rec in engines.values(): rec['connection'].close()
[ "def", "run_migrations_online", "(", ")", ":", "# this callback is used to prevent an auto-migration from being generated", "# when there are no changes to the schema", "# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html", "def", "process_revision_directives", "(", "context", ",", "revision", ",", "directives", ")", ":", "if", "getattr", "(", "config", ".", "cmd_opts", ",", "'autogenerate'", ",", "False", ")", ":", "script", "=", "directives", "[", "0", "]", "if", "len", "(", "script", ".", "upgrade_ops_list", ")", ">=", "len", "(", "bind_names", ")", "+", "1", ":", "empty", "=", "True", "for", "upgrade_ops", "in", "script", ".", "upgrade_ops_list", ":", "if", "not", "upgrade_ops", ".", "is_empty", "(", ")", ":", "empty", "=", "False", "if", "empty", ":", "directives", "[", ":", "]", "=", "[", "]", "logger", ".", "info", "(", "'No changes in schema detected.'", ")", "# for the direct-to-DB use case, start a transaction on all", "# engines, then run all migrations, then commit all transactions.", "engines", "=", "{", "''", ":", "{", "'engine'", ":", "engine_from_config", "(", "config", ".", "get_section", "(", "config", ".", "config_ini_section", ")", ",", "prefix", "=", "'sqlalchemy.'", ",", "poolclass", "=", "pool", ".", "NullPool", ",", ")", "}", "}", "for", "name", "in", "bind_names", ":", "engines", "[", "name", "]", "=", "rec", "=", "{", "}", "rec", "[", "'engine'", "]", "=", "engine_from_config", "(", "context", ".", "config", ".", "get_section", "(", "name", ")", ",", "prefix", "=", "'sqlalchemy.'", ",", "poolclass", "=", "pool", ".", "NullPool", ")", "for", "name", ",", "rec", "in", "engines", ".", "items", "(", ")", ":", "engine", "=", "rec", "[", "'engine'", "]", "rec", "[", "'connection'", "]", "=", "conn", "=", "engine", ".", "connect", "(", ")", "if", "USE_TWOPHASE", ":", "rec", "[", "'transaction'", "]", "=", "conn", ".", "begin_twophase", "(", ")", "else", ":", "rec", "[", "'transaction'", "]", "=", "conn", ".", "begin", "(", ")", "try", ":", "for", "name", ",", "rec", "in", "engines", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"Migrating database %s\"", "%", "(", "name", "or", "'<default>'", ")", ")", "context", ".", "configure", "(", "connection", "=", "rec", "[", "'connection'", "]", ",", "upgrade_token", "=", "\"%s_upgrades\"", "%", "name", ",", "downgrade_token", "=", "\"%s_downgrades\"", "%", "name", ",", "target_metadata", "=", "get_metadata", "(", "name", ")", ",", "process_revision_directives", "=", "process_revision_directives", ",", "*", "*", "current_app", ".", "extensions", "[", "'migrate'", "]", ".", "configure_args", ")", "context", ".", "run_migrations", "(", "engine_name", "=", "name", ")", "if", "USE_TWOPHASE", ":", "for", "rec", "in", "engines", ".", "values", "(", ")", ":", "rec", "[", "'transaction'", "]", ".", "prepare", "(", ")", "for", "rec", "in", "engines", ".", "values", "(", ")", ":", "rec", "[", "'transaction'", "]", ".", "commit", "(", ")", "except", ":", "for", "rec", "in", "engines", ".", "values", "(", ")", ":", "rec", "[", "'transaction'", "]", ".", "rollback", "(", ")", "raise", "finally", ":", "for", "rec", "in", "engines", ".", "values", "(", ")", ":", "rec", "[", "'connection'", "]", ".", "close", "(", ")" ]
34.934211
0.000733
def pause(self): """Change state to paused.""" if self.state == STATE_PLAYING: self._player.set_state(Gst.State.PAUSED) self.state = STATE_PAUSED
[ "def", "pause", "(", "self", ")", ":", "if", "self", ".", "state", "==", "STATE_PLAYING", ":", "self", ".", "_player", ".", "set_state", "(", "Gst", ".", "State", ".", "PAUSED", ")", "self", ".", "state", "=", "STATE_PAUSED" ]
36.2
0.010811
def _create_model_info_endpoint(self, path='/info/model'): """Create an endpoint to serve info GET requests.""" model = self.model # parse model details model_details = {} for key, value in model.__dict__.items(): model_details[key] = make_serializable(value) # create generic restful resource to serve model information as JSON class ModelInfo(Resource): @staticmethod def get(): return model_details self.api.add_resource(ModelInfo, path) self.app.logger.info('Regestered informational resource to {} (available via GET)'.format(path)) self.app.logger.debug('Endpoint {} will now serve the following static data:\n{}'.format(path, model_details))
[ "def", "_create_model_info_endpoint", "(", "self", ",", "path", "=", "'/info/model'", ")", ":", "model", "=", "self", ".", "model", "# parse model details", "model_details", "=", "{", "}", "for", "key", ",", "value", "in", "model", ".", "__dict__", ".", "items", "(", ")", ":", "model_details", "[", "key", "]", "=", "make_serializable", "(", "value", ")", "# create generic restful resource to serve model information as JSON", "class", "ModelInfo", "(", "Resource", ")", ":", "@", "staticmethod", "def", "get", "(", ")", ":", "return", "model_details", "self", ".", "api", ".", "add_resource", "(", "ModelInfo", ",", "path", ")", "self", ".", "app", ".", "logger", ".", "info", "(", "'Regestered informational resource to {} (available via GET)'", ".", "format", "(", "path", ")", ")", "self", ".", "app", ".", "logger", ".", "debug", "(", "'Endpoint {} will now serve the following static data:\\n{}'", ".", "format", "(", "path", ",", "model_details", ")", ")" ]
42.555556
0.005109
def _InsservExpander(self, facilities, val): """Expand insserv variables.""" expanded = [] if val.startswith("$"): vals = facilities.get(val, []) for v in vals: expanded.extend(self._InsservExpander(facilities, v)) elif val.startswith("+"): expanded.append(val[1:]) else: expanded.append(val) return expanded
[ "def", "_InsservExpander", "(", "self", ",", "facilities", ",", "val", ")", ":", "expanded", "=", "[", "]", "if", "val", ".", "startswith", "(", "\"$\"", ")", ":", "vals", "=", "facilities", ".", "get", "(", "val", ",", "[", "]", ")", "for", "v", "in", "vals", ":", "expanded", ".", "extend", "(", "self", ".", "_InsservExpander", "(", "facilities", ",", "v", ")", ")", "elif", "val", ".", "startswith", "(", "\"+\"", ")", ":", "expanded", ".", "append", "(", "val", "[", "1", ":", "]", ")", "else", ":", "expanded", ".", "append", "(", "val", ")", "return", "expanded" ]
29.416667
0.013736
def connection(self, shareable=True): """Get a steady, cached DB-API 2 connection from the pool. If shareable is set and the underlying DB-API 2 allows it, then the connection may be shared with other threads. """ if shareable and self._maxshared: self._lock.acquire() try: while (not self._shared_cache and self._maxconnections and self._connections >= self._maxconnections): self._wait_lock() if len(self._shared_cache) < self._maxshared: # shared cache is not full, get a dedicated connection try: # first try to get it from the idle cache con = self._idle_cache.pop(0) except IndexError: # else get a fresh connection con = self.steady_connection() else: con._ping_check() # check this connection con = SharedDBConnection(con) self._connections += 1 else: # shared cache full or no more connections allowed self._shared_cache.sort() # least shared connection first con = self._shared_cache.pop(0) # get it while con.con._transaction: # do not share connections which are in a transaction self._shared_cache.insert(0, con) self._wait_lock() self._shared_cache.sort() con = self._shared_cache.pop(0) con.con._ping_check() # check the underlying connection con.share() # increase share of this connection # put the connection (back) into the shared cache self._shared_cache.append(con) self._lock.notify() finally: self._lock.release() con = PooledSharedDBConnection(self, con) else: # try to get a dedicated connection self._lock.acquire() try: while (self._maxconnections and self._connections >= self._maxconnections): self._wait_lock() # connection limit not reached, get a dedicated connection try: # first try to get it from the idle cache con = self._idle_cache.pop(0) except IndexError: # else get a fresh connection con = self.steady_connection() else: con._ping_check() # check connection con = PooledDedicatedDBConnection(self, con) self._connections += 1 finally: self._lock.release() return con
[ "def", "connection", "(", "self", ",", "shareable", "=", "True", ")", ":", "if", "shareable", "and", "self", ".", "_maxshared", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "while", "(", "not", "self", ".", "_shared_cache", "and", "self", ".", "_maxconnections", "and", "self", ".", "_connections", ">=", "self", ".", "_maxconnections", ")", ":", "self", ".", "_wait_lock", "(", ")", "if", "len", "(", "self", ".", "_shared_cache", ")", "<", "self", ".", "_maxshared", ":", "# shared cache is not full, get a dedicated connection", "try", ":", "# first try to get it from the idle cache", "con", "=", "self", ".", "_idle_cache", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "# else get a fresh connection", "con", "=", "self", ".", "steady_connection", "(", ")", "else", ":", "con", ".", "_ping_check", "(", ")", "# check this connection", "con", "=", "SharedDBConnection", "(", "con", ")", "self", ".", "_connections", "+=", "1", "else", ":", "# shared cache full or no more connections allowed", "self", ".", "_shared_cache", ".", "sort", "(", ")", "# least shared connection first", "con", "=", "self", ".", "_shared_cache", ".", "pop", "(", "0", ")", "# get it", "while", "con", ".", "con", ".", "_transaction", ":", "# do not share connections which are in a transaction", "self", ".", "_shared_cache", ".", "insert", "(", "0", ",", "con", ")", "self", ".", "_wait_lock", "(", ")", "self", ".", "_shared_cache", ".", "sort", "(", ")", "con", "=", "self", ".", "_shared_cache", ".", "pop", "(", "0", ")", "con", ".", "con", ".", "_ping_check", "(", ")", "# check the underlying connection", "con", ".", "share", "(", ")", "# increase share of this connection", "# put the connection (back) into the shared cache", "self", ".", "_shared_cache", ".", "append", "(", "con", ")", "self", ".", "_lock", ".", "notify", "(", ")", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")", "con", "=", "PooledSharedDBConnection", "(", "self", ",", "con", ")", "else", ":", "# try to get a dedicated connection", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "while", "(", "self", ".", "_maxconnections", "and", "self", ".", "_connections", ">=", "self", ".", "_maxconnections", ")", ":", "self", ".", "_wait_lock", "(", ")", "# connection limit not reached, get a dedicated connection", "try", ":", "# first try to get it from the idle cache", "con", "=", "self", ".", "_idle_cache", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "# else get a fresh connection", "con", "=", "self", ".", "steady_connection", "(", ")", "else", ":", "con", ".", "_ping_check", "(", ")", "# check connection", "con", "=", "PooledDedicatedDBConnection", "(", "self", ",", "con", ")", "self", ".", "_connections", "+=", "1", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")", "return", "con" ]
48.362069
0.000699
def logger(*args, **kwargs): """ shortcut to :func:`utils.get_basic_logger` """ if mpi.within_mpirun and mpi.myrank == 0: # tell the workers to invoke the same logger mpi.comm.bcast({'worker_command': 'logger', 'args': args, 'kwargs': kwargs}, root=0) return _utils.get_basic_logger(*args, **kwargs)
[ "def", "logger", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "mpi", ".", "within_mpirun", "and", "mpi", ".", "myrank", "==", "0", ":", "# tell the workers to invoke the same logger", "mpi", ".", "comm", ".", "bcast", "(", "{", "'worker_command'", ":", "'logger'", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", "}", ",", "root", "=", "0", ")", "return", "_utils", ".", "get_basic_logger", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
36.444444
0.005952
def _cleanup(self): """Remove the connection from the stack, closing out the cursor""" if self._cursor: LOGGER.debug('Closing the cursor on %s', self.pid) self._cursor.close() self._cursor = None if self._conn: LOGGER.debug('Freeing %s in the pool', self.pid) try: pool.PoolManager.instance().free(self.pid, self._conn) except pool.ConnectionNotFoundError: pass self._conn = None
[ "def", "_cleanup", "(", "self", ")", ":", "if", "self", ".", "_cursor", ":", "LOGGER", ".", "debug", "(", "'Closing the cursor on %s'", ",", "self", ".", "pid", ")", "self", ".", "_cursor", ".", "close", "(", ")", "self", ".", "_cursor", "=", "None", "if", "self", ".", "_conn", ":", "LOGGER", ".", "debug", "(", "'Freeing %s in the pool'", ",", "self", ".", "pid", ")", "try", ":", "pool", ".", "PoolManager", ".", "instance", "(", ")", ".", "free", "(", "self", ".", "pid", ",", "self", ".", "_conn", ")", "except", "pool", ".", "ConnectionNotFoundError", ":", "pass", "self", ".", "_conn", "=", "None" ]
36.214286
0.003846
def init_app(self, app, conf_key=None): """ :type app: flask.Flask :parm str conf_key: Key of flask config. """ conf_key = conf_key or self.conf_key or 'PYMEMCACHE' self.conf_key = conf_key conf = app.config[conf_key] if not isinstance(conf, dict): raise TypeError("Flask-PyMemcache conf should be dict") close_on_teardown = conf.pop('close_on_teardown', False) if isinstance(conf['server'], list): conf['servers'] = conf.pop('server') client = pymemcache.client.hash.HashClient(**conf) elif isinstance(conf['server'], tuple): client = pymemcache.client.Client(**conf) else: raise TypeError("Flask-PyMemcache conf['server'] should be tuple or list of tuples") app.extensions.setdefault('pymemcache', {}) app.extensions['pymemcache'][self] = client if close_on_teardown: @app.teardown_appcontext def close_connection(exc=None): client.close()
[ "def", "init_app", "(", "self", ",", "app", ",", "conf_key", "=", "None", ")", ":", "conf_key", "=", "conf_key", "or", "self", ".", "conf_key", "or", "'PYMEMCACHE'", "self", ".", "conf_key", "=", "conf_key", "conf", "=", "app", ".", "config", "[", "conf_key", "]", "if", "not", "isinstance", "(", "conf", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"Flask-PyMemcache conf should be dict\"", ")", "close_on_teardown", "=", "conf", ".", "pop", "(", "'close_on_teardown'", ",", "False", ")", "if", "isinstance", "(", "conf", "[", "'server'", "]", ",", "list", ")", ":", "conf", "[", "'servers'", "]", "=", "conf", ".", "pop", "(", "'server'", ")", "client", "=", "pymemcache", ".", "client", ".", "hash", ".", "HashClient", "(", "*", "*", "conf", ")", "elif", "isinstance", "(", "conf", "[", "'server'", "]", ",", "tuple", ")", ":", "client", "=", "pymemcache", ".", "client", ".", "Client", "(", "*", "*", "conf", ")", "else", ":", "raise", "TypeError", "(", "\"Flask-PyMemcache conf['server'] should be tuple or list of tuples\"", ")", "app", ".", "extensions", ".", "setdefault", "(", "'pymemcache'", ",", "{", "}", ")", "app", ".", "extensions", "[", "'pymemcache'", "]", "[", "self", "]", "=", "client", "if", "close_on_teardown", ":", "@", "app", ".", "teardown_appcontext", "def", "close_connection", "(", "exc", "=", "None", ")", ":", "client", ".", "close", "(", ")" ]
37.071429
0.002817
def findExtname(fimg, extname, extver=None): """ Returns the list number of the extension corresponding to EXTNAME given. """ i = 0 extnum = None for chip in fimg: hdr = chip.header if 'EXTNAME' in hdr: if hdr['EXTNAME'].strip() == extname.upper(): if extver is None or hdr['EXTVER'] == extver: extnum = i break i += 1 return extnum
[ "def", "findExtname", "(", "fimg", ",", "extname", ",", "extver", "=", "None", ")", ":", "i", "=", "0", "extnum", "=", "None", "for", "chip", "in", "fimg", ":", "hdr", "=", "chip", ".", "header", "if", "'EXTNAME'", "in", "hdr", ":", "if", "hdr", "[", "'EXTNAME'", "]", ".", "strip", "(", ")", "==", "extname", ".", "upper", "(", ")", ":", "if", "extver", "is", "None", "or", "hdr", "[", "'EXTVER'", "]", "==", "extver", ":", "extnum", "=", "i", "break", "i", "+=", "1", "return", "extnum" ]
27.375
0.002208
def tags_impl(self): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute(''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME,)) result = {} for (tag_name, display_name, run_name) in cursor: if run_name not in result: result[run_name] = {} result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } else: # Read tags from events files. runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, _) in six.iteritems(tag_to_content): summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
[ "def", "tags_impl", "(", "self", ")", ":", "if", "self", ".", "_db_connection_provider", ":", "# Read tags from the database.", "db", "=", "self", ".", "_db_connection_provider", "(", ")", "cursor", "=", "db", ".", "execute", "(", "'''\n SELECT\n Tags.tag_name,\n Tags.display_name,\n Runs.run_name\n FROM Tags\n JOIN Runs\n ON Tags.run_id = Runs.run_id\n WHERE\n Tags.plugin_name = ?\n '''", ",", "(", "metadata", ".", "PLUGIN_NAME", ",", ")", ")", "result", "=", "{", "}", "for", "(", "tag_name", ",", "display_name", ",", "run_name", ")", "in", "cursor", ":", "if", "run_name", "not", "in", "result", ":", "result", "[", "run_name", "]", "=", "{", "}", "result", "[", "run_name", "]", "[", "tag_name", "]", "=", "{", "'displayName'", ":", "display_name", ",", "# TODO(chihuahua): Populate the description. Currently, the tags", "# table does not link with the description table.", "'description'", ":", "''", ",", "}", "else", ":", "# Read tags from events files.", "runs", "=", "self", ".", "_multiplexer", ".", "Runs", "(", ")", "result", "=", "{", "run", ":", "{", "}", "for", "run", "in", "runs", "}", "mapping", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "metadata", ".", "PLUGIN_NAME", ")", "for", "(", "run", ",", "tag_to_content", ")", "in", "six", ".", "iteritems", "(", "mapping", ")", ":", "for", "(", "tag", ",", "_", ")", "in", "six", ".", "iteritems", "(", "tag_to_content", ")", ":", "summary_metadata", "=", "self", ".", "_multiplexer", ".", "SummaryMetadata", "(", "run", ",", "tag", ")", "result", "[", "run", "]", "[", "tag", "]", "=", "{", "'displayName'", ":", "summary_metadata", ".", "display_name", ",", "'description'", ":", "plugin_util", ".", "markdown_to_safe_html", "(", "summary_metadata", ".", "summary_description", ")", "}", "return", "result" ]
35.181818
0.008799
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist and plugin not disabled if not self.stats or self.is_disable(): return ret # Build the string message # Header msg = '{}'.format('MEM') ret.append(self.curse_add_line(msg, "TITLE")) msg = ' {:2}'.format(self.trend_msg(self.get_trend('percent'))) ret.append(self.curse_add_line(msg)) # Percent memory usage msg = '{:>7.1%}'.format(self.stats['percent'] / 100) ret.append(self.curse_add_line(msg)) # Active memory usage if 'active' in self.stats: msg = ' {:9}'.format('active:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['active'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional'))) # New line ret.append(self.curse_new_line()) # Total memory usage msg = '{:6}'.format('total:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['total'])) ret.append(self.curse_add_line(msg)) # Inactive memory usage if 'inactive' in self.stats: msg = ' {:9}'.format('inactive:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['inactive'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional'))) # New line ret.append(self.curse_new_line()) # Used memory usage msg = '{:6}'.format('used:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['used'])) ret.append(self.curse_add_line( msg, self.get_views(key='used', option='decoration'))) # Buffers memory usage if 'buffers' in self.stats: msg = ' {:9}'.format('buffers:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['buffers'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional'))) # New line ret.append(self.curse_new_line()) # Free memory usage msg = '{:6}'.format('free:') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format(self.auto_unit(self.stats['free'])) ret.append(self.curse_add_line(msg)) # Cached memory usage if 'cached' in self.stats: msg = ' {:9}'.format('cached:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional'))) msg = '{:>7}'.format(self.auto_unit(self.stats['cached'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional'))) return ret
[ "def", "msg_curse", "(", "self", ",", "args", "=", "None", ",", "max_width", "=", "None", ")", ":", "# Init the return message", "ret", "=", "[", "]", "# Only process if stats exist and plugin not disabled", "if", "not", "self", ".", "stats", "or", "self", ".", "is_disable", "(", ")", ":", "return", "ret", "# Build the string message", "# Header", "msg", "=", "'{}'", ".", "format", "(", "'MEM'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "\"TITLE\"", ")", ")", "msg", "=", "' {:2}'", ".", "format", "(", "self", ".", "trend_msg", "(", "self", ".", "get_trend", "(", "'percent'", ")", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Percent memory usage", "msg", "=", "'{:>7.1%}'", ".", "format", "(", "self", ".", "stats", "[", "'percent'", "]", "/", "100", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Active memory usage", "if", "'active'", "in", "self", ".", "stats", ":", "msg", "=", "' {:9}'", ".", "format", "(", "'active:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'active'", ",", "option", "=", "'optional'", ")", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'active'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'active'", ",", "option", "=", "'optional'", ")", ")", ")", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# Total memory usage", "msg", "=", "'{:6}'", ".", "format", "(", "'total:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'total'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Inactive memory usage", "if", "'inactive'", "in", "self", ".", "stats", ":", "msg", "=", "' {:9}'", ".", "format", "(", "'inactive:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'inactive'", ",", "option", "=", "'optional'", ")", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'inactive'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'inactive'", ",", "option", "=", "'optional'", ")", ")", ")", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# Used memory usage", "msg", "=", "'{:6}'", ".", "format", "(", "'used:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'used'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "self", ".", "get_views", "(", "key", "=", "'used'", ",", "option", "=", "'decoration'", ")", ")", ")", "# Buffers memory usage", "if", "'buffers'", "in", "self", ".", "stats", ":", "msg", "=", "' {:9}'", ".", "format", "(", "'buffers:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'buffers'", ",", "option", "=", "'optional'", ")", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'buffers'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'buffers'", ",", "option", "=", "'optional'", ")", ")", ")", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# Free memory usage", "msg", "=", "'{:6}'", ".", "format", "(", "'free:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'free'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Cached memory usage", "if", "'cached'", "in", "self", ".", "stats", ":", "msg", "=", "' {:9}'", ".", "format", "(", "'cached:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'cached'", ",", "option", "=", "'optional'", ")", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "stats", "[", "'cached'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "self", ".", "get_views", "(", "key", "=", "'cached'", ",", "option", "=", "'optional'", ")", ")", ")", "return", "ret" ]
47.757576
0.003108
def uninstall_all_passbands(local=True): """ Uninstall all passbands, either globally or locally (need to call twice to delete ALL passbands) If local=False, you must have permission to access the installation directory """ pbdir = _pbdir_local if local else _pbdir_global for f in os.listdir(pbdir): pbpath = os.path.join(pbdir, f) logger.warning("deleting file: {}".format(pbpath)) os.remove(pbpath)
[ "def", "uninstall_all_passbands", "(", "local", "=", "True", ")", ":", "pbdir", "=", "_pbdir_local", "if", "local", "else", "_pbdir_global", "for", "f", "in", "os", ".", "listdir", "(", "pbdir", ")", ":", "pbpath", "=", "os", ".", "path", ".", "join", "(", "pbdir", ",", "f", ")", "logger", ".", "warning", "(", "\"deleting file: {}\"", ".", "format", "(", "pbpath", ")", ")", "os", ".", "remove", "(", "pbpath", ")" ]
36.916667
0.004405
def get_canonical_correlations(dataframe, column_types): ''' computes the correlation coefficient between each distinct pairing of columns preprocessing note: any rows with missing values (in either paired column) are dropped for that pairing categorical columns are replaced with one-hot encoded columns any columns which have only one distinct value (after dropping missing values) are skipped returns a list of the pairwise canonical correlation coefficients ''' def preprocess(series): if column_types[series.name] == 'CATEGORICAL': series = pd.get_dummies(series) array = series.values.reshape(series.shape[0], -1) return array if dataframe.shape[1] < 2: return [] correlations = [] skip_cols = set() for col_name_i, col_name_j in itertools.combinations(dataframe.columns, 2): if col_name_i in skip_cols or col_name_j in skip_cols: correlations.append(0) continue df_ij = dataframe[[col_name_i, col_name_j]].dropna(axis=0, how="any") col_i = df_ij[col_name_i] col_j = df_ij[col_name_j] if np.unique(col_i).shape[0] <= 1: skip_cols.add(col_name_i) correlations.append(0) continue if np.unique(col_j).shape[0] <= 1: skip_cols.add(col_name_j) correlations.append(0) continue col_i = preprocess(col_i) col_j = preprocess(col_j) col_i_c, col_j_c = CCA(n_components=1).fit_transform(col_i,col_j) if np.unique(col_i_c).shape[0] <= 1 or np.unique(col_j_c).shape[0] <= 1: c = 0 else: c = np.corrcoef(col_i_c.T, col_j_c.T)[0,1] correlations.append(c) return correlations
[ "def", "get_canonical_correlations", "(", "dataframe", ",", "column_types", ")", ":", "def", "preprocess", "(", "series", ")", ":", "if", "column_types", "[", "series", ".", "name", "]", "==", "'CATEGORICAL'", ":", "series", "=", "pd", ".", "get_dummies", "(", "series", ")", "array", "=", "series", ".", "values", ".", "reshape", "(", "series", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "return", "array", "if", "dataframe", ".", "shape", "[", "1", "]", "<", "2", ":", "return", "[", "]", "correlations", "=", "[", "]", "skip_cols", "=", "set", "(", ")", "for", "col_name_i", ",", "col_name_j", "in", "itertools", ".", "combinations", "(", "dataframe", ".", "columns", ",", "2", ")", ":", "if", "col_name_i", "in", "skip_cols", "or", "col_name_j", "in", "skip_cols", ":", "correlations", ".", "append", "(", "0", ")", "continue", "df_ij", "=", "dataframe", "[", "[", "col_name_i", ",", "col_name_j", "]", "]", ".", "dropna", "(", "axis", "=", "0", ",", "how", "=", "\"any\"", ")", "col_i", "=", "df_ij", "[", "col_name_i", "]", "col_j", "=", "df_ij", "[", "col_name_j", "]", "if", "np", ".", "unique", "(", "col_i", ")", ".", "shape", "[", "0", "]", "<=", "1", ":", "skip_cols", ".", "add", "(", "col_name_i", ")", "correlations", ".", "append", "(", "0", ")", "continue", "if", "np", ".", "unique", "(", "col_j", ")", ".", "shape", "[", "0", "]", "<=", "1", ":", "skip_cols", ".", "add", "(", "col_name_j", ")", "correlations", ".", "append", "(", "0", ")", "continue", "col_i", "=", "preprocess", "(", "col_i", ")", "col_j", "=", "preprocess", "(", "col_j", ")", "col_i_c", ",", "col_j_c", "=", "CCA", "(", "n_components", "=", "1", ")", ".", "fit_transform", "(", "col_i", ",", "col_j", ")", "if", "np", ".", "unique", "(", "col_i_c", ")", ".", "shape", "[", "0", "]", "<=", "1", "or", "np", ".", "unique", "(", "col_j_c", ")", ".", "shape", "[", "0", "]", "<=", "1", ":", "c", "=", "0", "else", ":", "c", "=", "np", ".", "corrcoef", "(", "col_i_c", ".", "T", ",", "col_j_c", ".", "T", ")", "[", "0", ",", "1", "]", "correlations", ".", "append", "(", "c", ")", "return", "correlations" ]
34.352941
0.003885
def find_nested_meta_first(d, prop_name, version): """Returns obj. for badgerfish and val for hbf. Appropriate for nested literals""" if _is_badgerfish_version(version): return find_nested_meta_first_bf(d, prop_name) p = '^' + prop_name return d.get(p)
[ "def", "find_nested_meta_first", "(", "d", ",", "prop_name", ",", "version", ")", ":", "if", "_is_badgerfish_version", "(", "version", ")", ":", "return", "find_nested_meta_first_bf", "(", "d", ",", "prop_name", ")", "p", "=", "'^'", "+", "prop_name", "return", "d", ".", "get", "(", "p", ")" ]
45.166667
0.007246
def force_auto(service, _type): """ Helper for forcing autoserialization of a datatype with already registered explicit storable instance. Arguments: service (StorableService): active storable service. _type (type): type to be autoserialized. **Not tested** """ storable = service.byPythonType(_type, istype=True) version = max(handler.version[0] for handler in storable.handlers) + 1 _storable = default_storable(_type, version=(version, )) storable.handlers.append(_storable.handlers[0])
[ "def", "force_auto", "(", "service", ",", "_type", ")", ":", "storable", "=", "service", ".", "byPythonType", "(", "_type", ",", "istype", "=", "True", ")", "version", "=", "max", "(", "handler", ".", "version", "[", "0", "]", "for", "handler", "in", "storable", ".", "handlers", ")", "+", "1", "_storable", "=", "default_storable", "(", "_type", ",", "version", "=", "(", "version", ",", ")", ")", "storable", ".", "handlers", ".", "append", "(", "_storable", ".", "handlers", "[", "0", "]", ")" ]
31.352941
0.003643
def get_scheme_dirs(): """Return a set of all scheme directories.""" scheme_glob = rel_to_cwd('schemes', '**', '*.yaml') scheme_groups = glob(scheme_glob) scheme_groups = [get_parent_dir(path) for path in scheme_groups] return set(scheme_groups)
[ "def", "get_scheme_dirs", "(", ")", ":", "scheme_glob", "=", "rel_to_cwd", "(", "'schemes'", ",", "'**'", ",", "'*.yaml'", ")", "scheme_groups", "=", "glob", "(", "scheme_glob", ")", "scheme_groups", "=", "[", "get_parent_dir", "(", "path", ")", "for", "path", "in", "scheme_groups", "]", "return", "set", "(", "scheme_groups", ")" ]
43.333333
0.003774
def getTzid(tzid, smart=True): """Return the tzid if it exists, or None.""" tz = __tzidMap.get(toUnicode(tzid), None) if smart and tzid and not tz: try: from pytz import timezone, UnknownTimeZoneError try: tz = timezone(tzid) registerTzid(toUnicode(tzid), tz) except UnknownTimeZoneError: pass except ImportError: pass return tz
[ "def", "getTzid", "(", "tzid", ",", "smart", "=", "True", ")", ":", "tz", "=", "__tzidMap", ".", "get", "(", "toUnicode", "(", "tzid", ")", ",", "None", ")", "if", "smart", "and", "tzid", "and", "not", "tz", ":", "try", ":", "from", "pytz", "import", "timezone", ",", "UnknownTimeZoneError", "try", ":", "tz", "=", "timezone", "(", "tzid", ")", "registerTzid", "(", "toUnicode", "(", "tzid", ")", ",", "tz", ")", "except", "UnknownTimeZoneError", ":", "pass", "except", "ImportError", ":", "pass", "return", "tz" ]
31.642857
0.002193
def set_structure(self, lattice, species, coords, coords_are_cartesian): """ Sets up the pymatgen structure for which the coordination geometries have to be identified starting from the lattice, the species and the coordinates :param lattice: The lattice of the structure :param species: The species on the sites :param coords: The coordinates of the sites :param coords_are_cartesian: If set to True, the coordinates are given in cartesian coordinates """ self.setup_structure( Structure(lattice, species, coords, coords_are_cartesian))
[ "def", "set_structure", "(", "self", ",", "lattice", ",", "species", ",", "coords", ",", "coords_are_cartesian", ")", ":", "self", ".", "setup_structure", "(", "Structure", "(", "lattice", ",", "species", ",", "coords", ",", "coords_are_cartesian", ")", ")" ]
55.545455
0.006441
def build_search(self): """Builds the Elasticsearch search body represented by this S. Loop over self.steps to build the search body that will be sent to Elasticsearch. This returns a Python dict. If you want the JSON that actually gets sent, then pass the return value through :py:func:`elasticutils.utils.to_json`. :returns: a Python dict """ filters = [] filters_raw = None queries = [] query_raw = None sort = [] dict_fields = set() list_fields = set() facets = {} facets_raw = {} demote = None highlight_fields = set() highlight_options = {} suggestions = {} explain = False as_list = as_dict = False search_type = None for action, value in self.steps: if action == 'order_by': sort = [] for key in value: if isinstance(key, string_types) and key.startswith('-'): sort.append({key[1:]: 'desc'}) else: sort.append(key) elif action == 'values_list': if not value: list_fields = set() else: list_fields |= set(value) as_list, as_dict = True, False elif action == 'values_dict': if not value: dict_fields = set() else: dict_fields |= set(value) as_list, as_dict = False, True elif action == 'explain': explain = value elif action == 'query': queries.append(value) elif action == 'query_raw': query_raw = value elif action == 'demote': # value here is a tuple of (negative_boost, query) demote = value elif action == 'filter': filters.extend(self._process_filters(value)) elif action == 'filter_raw': filters_raw = value elif action == 'facet': # value here is a (args, kwargs) tuple facets.update(_process_facets(*value)) elif action == 'facet_raw': facets_raw.update(dict(value)) elif action == 'highlight': if value[0] == (None,): highlight_fields = set() else: highlight_fields |= set(value[0]) highlight_options.update(value[1]) elif action == 'search_type': search_type = value elif action == 'suggest': suggestions[value[0]] = (value[1], value[2]) elif action in ('es', 'indexes', 'doctypes', 'boost'): # Ignore these--we use these elsewhere, but want to # make sure lack of handling it here doesn't throw an # error. pass else: raise NotImplementedError(action) qs = {} # If there's a filters_raw, we use that. if filters_raw: qs['filter'] = filters_raw else: if len(filters) > 1: qs['filter'] = {'and': filters} elif filters: qs['filter'] = filters[0] # If there's a query_raw, we use that. Otherwise we use # whatever we got from query and demote. if query_raw: qs['query'] = query_raw else: pq = self._process_queries(queries) if demote is not None: qs['query'] = { 'boosting': { 'negative': self._process_queries([demote[1]]), 'negative_boost': demote[0] } } if pq: qs['query']['boosting']['positive'] = pq elif pq: qs['query'] = pq if as_list: fields = qs['fields'] = list(list_fields) if list_fields else ['*'] elif as_dict: fields = qs['fields'] = list(dict_fields) if dict_fields else ['*'] else: fields = set() if facets: qs['facets'] = facets # Hunt for `facet_filter` shells and update those. We use # None as a shell, so if it's explicitly set to None, then # we update it. for facet in facets.values(): if facet.get('facet_filter', 1) is None and 'filter' in qs: facet['facet_filter'] = qs['filter'] if facets_raw: qs.setdefault('facets', {}).update(facets_raw) if sort: qs['sort'] = sort if self.start: qs['from'] = self.start if self.stop is not None: qs['size'] = self.stop - self.start if highlight_fields: qs['highlight'] = self._build_highlight( highlight_fields, highlight_options) if explain: qs['explain'] = True for suggestion, (term, kwargs) in six.iteritems(suggestions): qs.setdefault('suggest', {})[suggestion] = { 'text': term, 'term': { 'field': kwargs.get('field', '_all'), }, } self.fields, self.as_list, self.as_dict = fields, as_list, as_dict self.search_type = search_type return qs
[ "def", "build_search", "(", "self", ")", ":", "filters", "=", "[", "]", "filters_raw", "=", "None", "queries", "=", "[", "]", "query_raw", "=", "None", "sort", "=", "[", "]", "dict_fields", "=", "set", "(", ")", "list_fields", "=", "set", "(", ")", "facets", "=", "{", "}", "facets_raw", "=", "{", "}", "demote", "=", "None", "highlight_fields", "=", "set", "(", ")", "highlight_options", "=", "{", "}", "suggestions", "=", "{", "}", "explain", "=", "False", "as_list", "=", "as_dict", "=", "False", "search_type", "=", "None", "for", "action", ",", "value", "in", "self", ".", "steps", ":", "if", "action", "==", "'order_by'", ":", "sort", "=", "[", "]", "for", "key", "in", "value", ":", "if", "isinstance", "(", "key", ",", "string_types", ")", "and", "key", ".", "startswith", "(", "'-'", ")", ":", "sort", ".", "append", "(", "{", "key", "[", "1", ":", "]", ":", "'desc'", "}", ")", "else", ":", "sort", ".", "append", "(", "key", ")", "elif", "action", "==", "'values_list'", ":", "if", "not", "value", ":", "list_fields", "=", "set", "(", ")", "else", ":", "list_fields", "|=", "set", "(", "value", ")", "as_list", ",", "as_dict", "=", "True", ",", "False", "elif", "action", "==", "'values_dict'", ":", "if", "not", "value", ":", "dict_fields", "=", "set", "(", ")", "else", ":", "dict_fields", "|=", "set", "(", "value", ")", "as_list", ",", "as_dict", "=", "False", ",", "True", "elif", "action", "==", "'explain'", ":", "explain", "=", "value", "elif", "action", "==", "'query'", ":", "queries", ".", "append", "(", "value", ")", "elif", "action", "==", "'query_raw'", ":", "query_raw", "=", "value", "elif", "action", "==", "'demote'", ":", "# value here is a tuple of (negative_boost, query)", "demote", "=", "value", "elif", "action", "==", "'filter'", ":", "filters", ".", "extend", "(", "self", ".", "_process_filters", "(", "value", ")", ")", "elif", "action", "==", "'filter_raw'", ":", "filters_raw", "=", "value", "elif", "action", "==", "'facet'", ":", "# value here is a (args, kwargs) tuple", "facets", ".", "update", "(", "_process_facets", "(", "*", "value", ")", ")", "elif", "action", "==", "'facet_raw'", ":", "facets_raw", ".", "update", "(", "dict", "(", "value", ")", ")", "elif", "action", "==", "'highlight'", ":", "if", "value", "[", "0", "]", "==", "(", "None", ",", ")", ":", "highlight_fields", "=", "set", "(", ")", "else", ":", "highlight_fields", "|=", "set", "(", "value", "[", "0", "]", ")", "highlight_options", ".", "update", "(", "value", "[", "1", "]", ")", "elif", "action", "==", "'search_type'", ":", "search_type", "=", "value", "elif", "action", "==", "'suggest'", ":", "suggestions", "[", "value", "[", "0", "]", "]", "=", "(", "value", "[", "1", "]", ",", "value", "[", "2", "]", ")", "elif", "action", "in", "(", "'es'", ",", "'indexes'", ",", "'doctypes'", ",", "'boost'", ")", ":", "# Ignore these--we use these elsewhere, but want to", "# make sure lack of handling it here doesn't throw an", "# error.", "pass", "else", ":", "raise", "NotImplementedError", "(", "action", ")", "qs", "=", "{", "}", "# If there's a filters_raw, we use that.", "if", "filters_raw", ":", "qs", "[", "'filter'", "]", "=", "filters_raw", "else", ":", "if", "len", "(", "filters", ")", ">", "1", ":", "qs", "[", "'filter'", "]", "=", "{", "'and'", ":", "filters", "}", "elif", "filters", ":", "qs", "[", "'filter'", "]", "=", "filters", "[", "0", "]", "# If there's a query_raw, we use that. Otherwise we use", "# whatever we got from query and demote.", "if", "query_raw", ":", "qs", "[", "'query'", "]", "=", "query_raw", "else", ":", "pq", "=", "self", ".", "_process_queries", "(", "queries", ")", "if", "demote", "is", "not", "None", ":", "qs", "[", "'query'", "]", "=", "{", "'boosting'", ":", "{", "'negative'", ":", "self", ".", "_process_queries", "(", "[", "demote", "[", "1", "]", "]", ")", ",", "'negative_boost'", ":", "demote", "[", "0", "]", "}", "}", "if", "pq", ":", "qs", "[", "'query'", "]", "[", "'boosting'", "]", "[", "'positive'", "]", "=", "pq", "elif", "pq", ":", "qs", "[", "'query'", "]", "=", "pq", "if", "as_list", ":", "fields", "=", "qs", "[", "'fields'", "]", "=", "list", "(", "list_fields", ")", "if", "list_fields", "else", "[", "'*'", "]", "elif", "as_dict", ":", "fields", "=", "qs", "[", "'fields'", "]", "=", "list", "(", "dict_fields", ")", "if", "dict_fields", "else", "[", "'*'", "]", "else", ":", "fields", "=", "set", "(", ")", "if", "facets", ":", "qs", "[", "'facets'", "]", "=", "facets", "# Hunt for `facet_filter` shells and update those. We use", "# None as a shell, so if it's explicitly set to None, then", "# we update it.", "for", "facet", "in", "facets", ".", "values", "(", ")", ":", "if", "facet", ".", "get", "(", "'facet_filter'", ",", "1", ")", "is", "None", "and", "'filter'", "in", "qs", ":", "facet", "[", "'facet_filter'", "]", "=", "qs", "[", "'filter'", "]", "if", "facets_raw", ":", "qs", ".", "setdefault", "(", "'facets'", ",", "{", "}", ")", ".", "update", "(", "facets_raw", ")", "if", "sort", ":", "qs", "[", "'sort'", "]", "=", "sort", "if", "self", ".", "start", ":", "qs", "[", "'from'", "]", "=", "self", ".", "start", "if", "self", ".", "stop", "is", "not", "None", ":", "qs", "[", "'size'", "]", "=", "self", ".", "stop", "-", "self", ".", "start", "if", "highlight_fields", ":", "qs", "[", "'highlight'", "]", "=", "self", ".", "_build_highlight", "(", "highlight_fields", ",", "highlight_options", ")", "if", "explain", ":", "qs", "[", "'explain'", "]", "=", "True", "for", "suggestion", ",", "(", "term", ",", "kwargs", ")", "in", "six", ".", "iteritems", "(", "suggestions", ")", ":", "qs", ".", "setdefault", "(", "'suggest'", ",", "{", "}", ")", "[", "suggestion", "]", "=", "{", "'text'", ":", "term", ",", "'term'", ":", "{", "'field'", ":", "kwargs", ".", "get", "(", "'field'", ",", "'_all'", ")", ",", "}", ",", "}", "self", ".", "fields", ",", "self", ".", "as_list", ",", "self", ".", "as_dict", "=", "fields", ",", "as_list", ",", "as_dict", "self", ".", "search_type", "=", "search_type", "return", "qs" ]
33.583851
0.000359
def merge_and_average(self, platform, expression_column, group_by_column, force=False, merge_on_column=None, gsm_on=None, gpl_on=None): """Merge and average GSE samples. For given platform prepare the DataFrame with all the samples present in the GSE annotated with given column from platform and averaged over the column. Args: platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use. expression_column (:obj:`str`): Column name in which "expressions" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway merge_on_column (:obj:`str`): Column to merge the data on - should be present in both GSM and GPL gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL Returns: :obj:`pandas.DataFrame`: Merged and averaged table of results. """ if isinstance(platform, str): gpl = self.gpls[platform] elif isinstance(platform, GPL): gpl = platform else: raise ValueError("Platform has to be of type GPL or string with " "key for platform in GSE") data = [] for gsm in self.gsms.values(): if gpl.name == gsm.metadata['platform_id'][0]: data.append(gsm.annotate_and_average( gpl=gpl, merge_on_column=merge_on_column, expression_column=expression_column, group_by_column=group_by_column, force=force, gpl_on=gpl_on, gsm_on=gsm_on)) if len(data) == 0: logger.warning("No samples for the platform were found\n") return None elif len(data) == 1: return data[0] else: return data[0].join(data[1:])
[ "def", "merge_and_average", "(", "self", ",", "platform", ",", "expression_column", ",", "group_by_column", ",", "force", "=", "False", ",", "merge_on_column", "=", "None", ",", "gsm_on", "=", "None", ",", "gpl_on", "=", "None", ")", ":", "if", "isinstance", "(", "platform", ",", "str", ")", ":", "gpl", "=", "self", ".", "gpls", "[", "platform", "]", "elif", "isinstance", "(", "platform", ",", "GPL", ")", ":", "gpl", "=", "platform", "else", ":", "raise", "ValueError", "(", "\"Platform has to be of type GPL or string with \"", "\"key for platform in GSE\"", ")", "data", "=", "[", "]", "for", "gsm", "in", "self", ".", "gsms", ".", "values", "(", ")", ":", "if", "gpl", ".", "name", "==", "gsm", ".", "metadata", "[", "'platform_id'", "]", "[", "0", "]", ":", "data", ".", "append", "(", "gsm", ".", "annotate_and_average", "(", "gpl", "=", "gpl", ",", "merge_on_column", "=", "merge_on_column", ",", "expression_column", "=", "expression_column", ",", "group_by_column", "=", "group_by_column", ",", "force", "=", "force", ",", "gpl_on", "=", "gpl_on", ",", "gsm_on", "=", "gsm_on", ")", ")", "if", "len", "(", "data", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"No samples for the platform were found\\n\"", ")", "return", "None", "elif", "len", "(", "data", ")", "==", "1", ":", "return", "data", "[", "0", "]", "else", ":", "return", "data", "[", "0", "]", ".", "join", "(", "data", "[", "1", ":", "]", ")" ]
42.907407
0.00211
def DeleteAddress(self, script_hash): """ Deletes an address from the wallet (includes watch-only addresses). Args: script_hash (UInt160): a bytearray (len 20) representing the public key. Returns: tuple: bool: True if address removed, False otherwise. list: a list of any ``neo.Wallet.Coin`` objects to be removed from the wallet. """ coin_keys_toremove = [] coins_to_remove = [] for key, coinref in self._coins.items(): if coinref.Output.ScriptHash.ToBytes() == script_hash.ToBytes(): coin_keys_toremove.append(key) coins_to_remove.append(coinref) for k in coin_keys_toremove: del self._coins[k] ok = False if script_hash.ToBytes() in self._contracts.keys(): ok = True del self._contracts[script_hash.ToBytes()] elif script_hash in self._watch_only: ok = True self._watch_only.remove(script_hash) return ok, coins_to_remove
[ "def", "DeleteAddress", "(", "self", ",", "script_hash", ")", ":", "coin_keys_toremove", "=", "[", "]", "coins_to_remove", "=", "[", "]", "for", "key", ",", "coinref", "in", "self", ".", "_coins", ".", "items", "(", ")", ":", "if", "coinref", ".", "Output", ".", "ScriptHash", ".", "ToBytes", "(", ")", "==", "script_hash", ".", "ToBytes", "(", ")", ":", "coin_keys_toremove", ".", "append", "(", "key", ")", "coins_to_remove", ".", "append", "(", "coinref", ")", "for", "k", "in", "coin_keys_toremove", ":", "del", "self", ".", "_coins", "[", "k", "]", "ok", "=", "False", "if", "script_hash", ".", "ToBytes", "(", ")", "in", "self", ".", "_contracts", ".", "keys", "(", ")", ":", "ok", "=", "True", "del", "self", ".", "_contracts", "[", "script_hash", ".", "ToBytes", "(", ")", "]", "elif", "script_hash", "in", "self", ".", "_watch_only", ":", "ok", "=", "True", "self", ".", "_watch_only", ".", "remove", "(", "script_hash", ")", "return", "ok", ",", "coins_to_remove" ]
34.322581
0.003656