text
stringlengths
78
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
487
def get_last_weeks(number_of_weeks): """Get the last weeks.""" time_now = datetime.now() year = time_now.isocalendar()[0] week = time_now.isocalendar()[1] weeks = [] for i in range(0, number_of_weeks): start = get_week_dates(year, week - i, as_timestamp=True)[0] n_year, n_week = get_year_week(start) weeks.append((n_year, n_week)) return weeks
[ "def", "get_last_weeks", "(", "number_of_weeks", ")", ":", "time_now", "=", "datetime", ".", "now", "(", ")", "year", "=", "time_now", ".", "isocalendar", "(", ")", "[", "0", "]", "week", "=", "time_now", ".", "isocalendar", "(", ")", "[", "1", "]", "weeks", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "number_of_weeks", ")", ":", "start", "=", "get_week_dates", "(", "year", ",", "week", "-", "i", ",", "as_timestamp", "=", "True", ")", "[", "0", "]", "n_year", ",", "n_week", "=", "get_year_week", "(", "start", ")", "weeks", ".", "append", "(", "(", "n_year", ",", "n_week", ")", ")", "return", "weeks" ]
32.166667
12.416667
def remove_from_organization(self, delete_account=False): """ Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another. """ self.append(removeFromOrg={"deleteAccount": True if delete_account else False}) return None
[ "def", "remove_from_organization", "(", "self", ",", "delete_account", "=", "False", ")", ":", "self", ".", "append", "(", "removeFromOrg", "=", "{", "\"deleteAccount\"", ":", "True", "if", "delete_account", "else", "False", "}", ")", "return", "None" ]
63.333333
34.444444
def _prepend_row_index(rows, index): """Add a left-most index column.""" if index is None or index is False: return rows if len(index) != len(rows): print('index=', index) print('rows=', rows) raise ValueError('index must be as long as the number of data rows') rows = [[v] + list(row) for v, row in zip(index, rows)] return rows
[ "def", "_prepend_row_index", "(", "rows", ",", "index", ")", ":", "if", "index", "is", "None", "or", "index", "is", "False", ":", "return", "rows", "if", "len", "(", "index", ")", "!=", "len", "(", "rows", ")", ":", "print", "(", "'index='", ",", "index", ")", "print", "(", "'rows='", ",", "rows", ")", "raise", "ValueError", "(", "'index must be as long as the number of data rows'", ")", "rows", "=", "[", "[", "v", "]", "+", "list", "(", "row", ")", "for", "v", ",", "row", "in", "zip", "(", "index", ",", "rows", ")", "]", "return", "rows" ]
37.2
13.7
def method_already_there(object_type, method_name, this_class_only=False): """ Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from the one in `object`. :param object_type: :param method_name: :param this_class_only: :return: """ if this_class_only: return method_name in vars(object_type) # or object_type.__dict__ else: try: method = getattr(object_type, method_name) except AttributeError: return False else: return method is not None and method is not getattr(object, method_name, None)
[ "def", "method_already_there", "(", "object_type", ",", "method_name", ",", "this_class_only", "=", "False", ")", ":", "if", "this_class_only", ":", "return", "method_name", "in", "vars", "(", "object_type", ")", "# or object_type.__dict__", "else", ":", "try", ":", "method", "=", "getattr", "(", "object_type", ",", "method_name", ")", "except", "AttributeError", ":", "return", "False", "else", ":", "return", "method", "is", "not", "None", "and", "method", "is", "not", "getattr", "(", "object", ",", "method_name", ",", "None", ")" ]
34
24.842105
def get_default_config_file_path(init_filename=None): """gets the path to the default config-file""" prm_dir = get_package_prm_dir() if not init_filename: init_filename = DEFAULT_FILENAME src = os.path.join(prm_dir, init_filename) return src
[ "def", "get_default_config_file_path", "(", "init_filename", "=", "None", ")", ":", "prm_dir", "=", "get_package_prm_dir", "(", ")", "if", "not", "init_filename", ":", "init_filename", "=", "DEFAULT_FILENAME", "src", "=", "os", ".", "path", ".", "join", "(", "prm_dir", ",", "init_filename", ")", "return", "src" ]
37.571429
9.285714
def get_calls(self, job_name): ''' Reads file by given name and returns CallEdge array ''' config = self.file_index.get_by_name(job_name).yaml calls = self.get_calls_from_dict(config, from_name=job_name) return calls
[ "def", "get_calls", "(", "self", ",", "job_name", ")", ":", "config", "=", "self", ".", "file_index", ".", "get_by_name", "(", "job_name", ")", ".", "yaml", "calls", "=", "self", ".", "get_calls_from_dict", "(", "config", ",", "from_name", "=", "job_name", ")", "return", "calls" ]
25.8
27.4
def get_unique_links(self): """ Get all unique links in the html of the page source. Page links include those obtained from: "a"->"href", "img"->"src", "link"->"href", and "script"->"src". """ page_url = self.get_current_url() soup = self.get_beautiful_soup(self.get_page_source()) links = page_utils._get_unique_links(page_url, soup) return links
[ "def", "get_unique_links", "(", "self", ")", ":", "page_url", "=", "self", ".", "get_current_url", "(", ")", "soup", "=", "self", ".", "get_beautiful_soup", "(", "self", ".", "get_page_source", "(", ")", ")", "links", "=", "page_utils", ".", "_get_unique_links", "(", "page_url", ",", "soup", ")", "return", "links" ]
50.5
10.875
def _post_zone(self, zone): """ Pushes updated zone for current domain to authenticated Hetzner account and returns a boolean, if update was successful or not. Furthermore, waits until the zone has been taken over, if it is a Hetzner Robot account. """ api = self.api[self.account]['zone'] data = zone['hidden'] data[api['file']] = zone['data'].to_text(relativize=True) response = self._post(api['POST']['url'], data=data) if Provider._filter_dom(response.text, api['filter']): LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s', zone['data'].origin.to_unicode(True), zone['data'].to_text(relativize=True).decode('UTF-8')) return False LOGGER.info('Hetzner => Update zone for domain %s', zone['data'].origin.to_unicode(True)) if self.account == 'robot': latency = self._get_provider_option('latency') LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...', latency) time.sleep(latency) return True
[ "def", "_post_zone", "(", "self", ",", "zone", ")", ":", "api", "=", "self", ".", "api", "[", "self", ".", "account", "]", "[", "'zone'", "]", "data", "=", "zone", "[", "'hidden'", "]", "data", "[", "api", "[", "'file'", "]", "]", "=", "zone", "[", "'data'", "]", ".", "to_text", "(", "relativize", "=", "True", ")", "response", "=", "self", ".", "_post", "(", "api", "[", "'POST'", "]", "[", "'url'", "]", ",", "data", "=", "data", ")", "if", "Provider", ".", "_filter_dom", "(", "response", ".", "text", ",", "api", "[", "'filter'", "]", ")", ":", "LOGGER", ".", "error", "(", "'Hetzner => Unable to update zone for domain %s: Syntax error\\n\\n%s'", ",", "zone", "[", "'data'", "]", ".", "origin", ".", "to_unicode", "(", "True", ")", ",", "zone", "[", "'data'", "]", ".", "to_text", "(", "relativize", "=", "True", ")", ".", "decode", "(", "'UTF-8'", ")", ")", "return", "False", "LOGGER", ".", "info", "(", "'Hetzner => Update zone for domain %s'", ",", "zone", "[", "'data'", "]", ".", "origin", ".", "to_unicode", "(", "True", ")", ")", "if", "self", ".", "account", "==", "'robot'", ":", "latency", "=", "self", ".", "_get_provider_option", "(", "'latency'", ")", "LOGGER", ".", "info", "(", "'Hetzner => Wait %ds until Hetzner Robot has taken over zone...'", ",", "latency", ")", "time", ".", "sleep", "(", "latency", ")", "return", "True" ]
49.416667
22.083333
def get_file_uuid(fpath, hasher=None, stride=1): """ Creates a uuid from the hash of a file """ if hasher is None: hasher = hashlib.sha1() # 20 bytes of output #hasher = hashlib.sha256() # 32 bytes of output # sha1 produces a 20 byte hash hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride) # sha1 produces 20 bytes, but UUID requires 16 bytes hashbytes_16 = hashbytes_20[0:16] uuid_ = uuid.UUID(bytes=hashbytes_16) return uuid_
[ "def", "get_file_uuid", "(", "fpath", ",", "hasher", "=", "None", ",", "stride", "=", "1", ")", ":", "if", "hasher", "is", "None", ":", "hasher", "=", "hashlib", ".", "sha1", "(", ")", "# 20 bytes of output", "#hasher = hashlib.sha256() # 32 bytes of output", "# sha1 produces a 20 byte hash", "hashbytes_20", "=", "get_file_hash", "(", "fpath", ",", "hasher", "=", "hasher", ",", "stride", "=", "stride", ")", "# sha1 produces 20 bytes, but UUID requires 16 bytes", "hashbytes_16", "=", "hashbytes_20", "[", "0", ":", "16", "]", "uuid_", "=", "uuid", ".", "UUID", "(", "bytes", "=", "hashbytes_16", ")", "return", "uuid_" ]
40.416667
11.166667
def buildCommands(self,files,args): """ Given a list of (input) files, buildCommands builds all the commands. This is one of the two key methods of MapExecutor. """ commands = [] count = args.count_from # For each file, a command is created: for fileName in files: commands.append(self.buildCommand(fileName,count,args)) count = count+1 return commands
[ "def", "buildCommands", "(", "self", ",", "files", ",", "args", ")", ":", "commands", "=", "[", "]", "count", "=", "args", ".", "count_from", "# For each file, a command is created:", "for", "fileName", "in", "files", ":", "commands", ".", "append", "(", "self", ".", "buildCommand", "(", "fileName", ",", "count", ",", "args", ")", ")", "count", "=", "count", "+", "1", "return", "commands" ]
36.416667
13.416667
def to_json(self): """Inherit doc.""" json = {"name": self.__class__.__name__, "num_ranges": len(self._iters)} for i in xrange(len(self._iters)): json_item = self._iters[i].to_json() query_spec = json_item["query_spec"] item_name = json_item["name"] # Delete and move one level up del json_item["query_spec"] del json_item["name"] json[str(i)] = json_item # Store once to save space json["query_spec"] = query_spec json["item_name"] = item_name return json
[ "def", "to_json", "(", "self", ")", ":", "json", "=", "{", "\"name\"", ":", "self", ".", "__class__", ".", "__name__", ",", "\"num_ranges\"", ":", "len", "(", "self", ".", "_iters", ")", "}", "for", "i", "in", "xrange", "(", "len", "(", "self", ".", "_iters", ")", ")", ":", "json_item", "=", "self", ".", "_iters", "[", "i", "]", ".", "to_json", "(", ")", "query_spec", "=", "json_item", "[", "\"query_spec\"", "]", "item_name", "=", "json_item", "[", "\"name\"", "]", "# Delete and move one level up", "del", "json_item", "[", "\"query_spec\"", "]", "del", "json_item", "[", "\"name\"", "]", "json", "[", "str", "(", "i", ")", "]", "=", "json_item", "# Store once to save space", "json", "[", "\"query_spec\"", "]", "=", "query_spec", "json", "[", "\"item_name\"", "]", "=", "item_name", "return", "json" ]
29.055556
11.166667
def preview(self, **query_params): """Returns a streaming handle to this job's preview search results. Unlike :class:`splunklib.results.ResultsReader`, which requires a job to be finished to return any results, the ``preview`` method returns any results that have been generated so far, whether the job is running or not. The returned search results are the raw data from the server. Pass the handle returned to :class:`splunklib.results.ResultsReader` to get a nice, Pythonic iterator over objects, as in:: import splunklib.client as client import splunklib.results as results service = client.connect(...) job = service.jobs.create("search * | head 5") rr = results.ResultsReader(job.preview()) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result if rr.is_preview: print "Preview of a running search job." else: print "Job is finished. Results are final." This method makes one roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. :param query_params: Additional parameters (optional). For a list of valid parameters, see `GET search/jobs/{search_id}/results_preview <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_ in the REST API documentation. :type query_params: ``dict`` :return: The ``InputStream`` IO handle to this job's preview results. """ query_params['segmentation'] = query_params.get('segmentation', 'none') return self.get("results_preview", **query_params).body
[ "def", "preview", "(", "self", ",", "*", "*", "query_params", ")", ":", "query_params", "[", "'segmentation'", "]", "=", "query_params", ".", "get", "(", "'segmentation'", ",", "'none'", ")", "return", "self", ".", "get", "(", "\"results_preview\"", ",", "*", "*", "query_params", ")", ".", "body" ]
49.571429
23.809524
def requires_conversion(cls, fileset, file_format): """Checks whether the fileset matches the requested file format""" if file_format is None: return False try: filset_format = fileset.format except AttributeError: return False # Field input else: return (file_format != filset_format)
[ "def", "requires_conversion", "(", "cls", ",", "fileset", ",", "file_format", ")", ":", "if", "file_format", "is", "None", ":", "return", "False", "try", ":", "filset_format", "=", "fileset", ".", "format", "except", "AttributeError", ":", "return", "False", "# Field input", "else", ":", "return", "(", "file_format", "!=", "filset_format", ")" ]
36.5
11.3
def replace_pool_members(hostname, username, password, name, members): ''' A function to connect to a bigip device and replace members of an existing pool with new members. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to modify members List of comma delimited pool members to replace existing members with. i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 CLI Example:: salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80 ''' payload = {} payload['name'] = name #specify members if provided if members is not None: if isinstance(members, six.string_types): members = members.split(',') pool_members = [] for member in members: #check to see if already a dictionary ( for states) if isinstance(member, dict): #check for state alternative name 'member_state', replace with state if 'member_state' in member.keys(): member['state'] = member.pop('member_state') #replace underscore with dash for key in member: new_key = key.replace('_', '-') member[new_key] = member.pop(key) pool_members.append(member) #parse string passed via execution command (for executions) else: pool_members.append({'name': member, 'address': member.split(':')[0]}) payload['members'] = pool_members #build session bigip_session = _build_session(username, password) #put to REST try: response = bigip_session.put( BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}'.format(name=name), data=salt.utils.json.dumps(payload) ) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
[ "def", "replace_pool_members", "(", "hostname", ",", "username", ",", "password", ",", "name", ",", "members", ")", ":", "payload", "=", "{", "}", "payload", "[", "'name'", "]", "=", "name", "#specify members if provided", "if", "members", "is", "not", "None", ":", "if", "isinstance", "(", "members", ",", "six", ".", "string_types", ")", ":", "members", "=", "members", ".", "split", "(", "','", ")", "pool_members", "=", "[", "]", "for", "member", "in", "members", ":", "#check to see if already a dictionary ( for states)", "if", "isinstance", "(", "member", ",", "dict", ")", ":", "#check for state alternative name 'member_state', replace with state", "if", "'member_state'", "in", "member", ".", "keys", "(", ")", ":", "member", "[", "'state'", "]", "=", "member", ".", "pop", "(", "'member_state'", ")", "#replace underscore with dash", "for", "key", "in", "member", ":", "new_key", "=", "key", ".", "replace", "(", "'_'", ",", "'-'", ")", "member", "[", "new_key", "]", "=", "member", ".", "pop", "(", "key", ")", "pool_members", ".", "append", "(", "member", ")", "#parse string passed via execution command (for executions)", "else", ":", "pool_members", ".", "append", "(", "{", "'name'", ":", "member", ",", "'address'", ":", "member", ".", "split", "(", "':'", ")", "[", "0", "]", "}", ")", "payload", "[", "'members'", "]", "=", "pool_members", "#build session", "bigip_session", "=", "_build_session", "(", "username", ",", "password", ")", "#put to REST", "try", ":", "response", "=", "bigip_session", ".", "put", "(", "BIG_IP_URL_BASE", ".", "format", "(", "host", "=", "hostname", ")", "+", "'/ltm/pool/{name}'", ".", "format", "(", "name", "=", "name", ")", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "payload", ")", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "return", "_load_connection_error", "(", "hostname", ",", "e", ")", "return", "_load_response", "(", "response", ")" ]
31.4
24.569231
def update_config_pwd(msg, cfg): """ Updates the profile's auth entry with values set by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance. """ msg_type = msg.__class__.__name__.lower() key_fmt = msg.profile + "_" + msg_type if isinstance(msg._auth, (MutableSequence, tuple)): cfg.pwd[key_fmt] = " :: ".join(msg._auth) else: cfg.pwd[key_fmt] = msg._auth
[ "def", "update_config_pwd", "(", "msg", ",", "cfg", ")", ":", "msg_type", "=", "msg", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "key_fmt", "=", "msg", ".", "profile", "+", "\"_\"", "+", "msg_type", "if", "isinstance", "(", "msg", ".", "_auth", ",", "(", "MutableSequence", ",", "tuple", ")", ")", ":", "cfg", ".", "pwd", "[", "key_fmt", "]", "=", "\" :: \"", ".", "join", "(", "msg", ".", "_auth", ")", "else", ":", "cfg", ".", "pwd", "[", "key_fmt", "]", "=", "msg", ".", "_auth" ]
33.8
13.4
def users_set_preferences(self, user_id, data, **kwargs): """Set user’s preferences.""" return self.__call_api_post('users.setPreferences', userId=user_id, data=data, kwargs=kwargs)
[ "def", "users_set_preferences", "(", "self", ",", "user_id", ",", "data", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'users.setPreferences'", ",", "userId", "=", "user_id", ",", "data", "=", "data", ",", "kwargs", "=", "kwargs", ")" ]
65
26
def mmPrettyPrintSequenceCellRepresentations(self, sortby="Column"): """ Pretty print the cell representations for sequences in the history. @param sortby (string) Column of table to sort by @return (string) Pretty-printed text """ self._mmComputeTransitionTraces() table = PrettyTable(["Pattern", "Column", "predicted=>active cells"]) for sequenceLabel, predictedActiveCells in ( self._mmData["predictedActiveCellsForSequence"].iteritems()): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) for column, cells in cellsForColumn.iteritems(): table.add_row([sequenceLabel, column, list(cells)]) return table.get_string(sortby=sortby).encode("utf-8")
[ "def", "mmPrettyPrintSequenceCellRepresentations", "(", "self", ",", "sortby", "=", "\"Column\"", ")", ":", "self", ".", "_mmComputeTransitionTraces", "(", ")", "table", "=", "PrettyTable", "(", "[", "\"Pattern\"", ",", "\"Column\"", ",", "\"predicted=>active cells\"", "]", ")", "for", "sequenceLabel", ",", "predictedActiveCells", "in", "(", "self", ".", "_mmData", "[", "\"predictedActiveCellsForSequence\"", "]", ".", "iteritems", "(", ")", ")", ":", "cellsForColumn", "=", "self", ".", "mapCellsToColumns", "(", "predictedActiveCells", ")", "for", "column", ",", "cells", "in", "cellsForColumn", ".", "iteritems", "(", ")", ":", "table", ".", "add_row", "(", "[", "sequenceLabel", ",", "column", ",", "list", "(", "cells", ")", "]", ")", "return", "table", ".", "get_string", "(", "sortby", "=", "sortby", ")", ".", "encode", "(", "\"utf-8\"", ")" ]
39.611111
21.388889
def avg(self, func=lambda x: x): """ Returns the average value of data elements :param func: lambda expression to transform data :return: average value as float object """ count = self.count() if count == 0: raise NoElementsError(u"Iterable contains no elements") return float(self.sum(func)) / float(count)
[ "def", "avg", "(", "self", ",", "func", "=", "lambda", "x", ":", "x", ")", ":", "count", "=", "self", ".", "count", "(", ")", "if", "count", "==", "0", ":", "raise", "NoElementsError", "(", "u\"Iterable contains no elements\"", ")", "return", "float", "(", "self", ".", "sum", "(", "func", ")", ")", "/", "float", "(", "count", ")" ]
37.4
10.8
def isHereDoc(self, block, column): """Check if character at column is a here document """ dataObject = block.userData() data = dataObject.data if dataObject is not None else None return self._syntax.isHereDoc(data, column)
[ "def", "isHereDoc", "(", "self", ",", "block", ",", "column", ")", ":", "dataObject", "=", "block", ".", "userData", "(", ")", "data", "=", "dataObject", ".", "data", "if", "dataObject", "is", "not", "None", "else", "None", "return", "self", ".", "_syntax", ".", "isHereDoc", "(", "data", ",", "column", ")" ]
43
7.5
def _create_stdout_logger(logging_level): """ create a logger to stdout. This creates logger for a series of module we would like to log information on. """ out_hdlr = logging.StreamHandler(sys.stdout) out_hdlr.setFormatter(logging.Formatter( '[%(asctime)s] %(message)s', "%H:%M:%S" )) out_hdlr.setLevel(logging_level) for name in LOGGING_NAMES: log = logging.getLogger(name) log.addHandler(out_hdlr) log.setLevel(logging_level)
[ "def", "_create_stdout_logger", "(", "logging_level", ")", ":", "out_hdlr", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "out_hdlr", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'[%(asctime)s] %(message)s'", ",", "\"%H:%M:%S\"", ")", ")", "out_hdlr", ".", "setLevel", "(", "logging_level", ")", "for", "name", "in", "LOGGING_NAMES", ":", "log", "=", "logging", ".", "getLogger", "(", "name", ")", "log", ".", "addHandler", "(", "out_hdlr", ")", "log", ".", "setLevel", "(", "logging_level", ")" ]
34.5
8.357143
def deleteCategory(self,name): """ Deletes the category with the given name. If the category does not exist, a :py:exc:`KeyError` will be thrown. """ if name not in self.categories: raise KeyError("No Category with name '%s'"%name) del self.categories[name] self.redraw()
[ "def", "deleteCategory", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "categories", ":", "raise", "KeyError", "(", "\"No Category with name '%s'\"", "%", "name", ")", "del", "self", ".", "categories", "[", "name", "]", "self", ".", "redraw", "(", ")" ]
33.9
13.5
def open(self, fn): "Open image in `fn`, subclass and overwrite for custom behavior." return open_image(fn, convert_mode=self.convert_mode, after_open=self.after_open)
[ "def", "open", "(", "self", ",", "fn", ")", ":", "return", "open_image", "(", "fn", ",", "convert_mode", "=", "self", ".", "convert_mode", ",", "after_open", "=", "self", ".", "after_open", ")" ]
60.333333
34.333333
def setup_shot_signals(self, ): """Setup the signals for the shot page :returns: None :rtype: None :raises: None """ log.debug("Setting up shot page signals.") self.shot_prj_view_pb.clicked.connect(self.shot_view_prj) self.shot_seq_view_pb.clicked.connect(self.shot_view_seq) self.shot_asset_view_pb.clicked.connect(self.shot_view_asset) self.shot_asset_create_pb.clicked.connect(self.shot_create_asset) self.shot_asset_add_pb.clicked.connect(self.shot_add_asset) self.shot_asset_remove_pb.clicked.connect(self.shot_remove_asset) self.shot_task_view_pb.clicked.connect(self.shot_view_task) self.shot_task_create_pb.clicked.connect(self.shot_create_task) self.shot_start_sb.valueChanged.connect(self.shot_save) self.shot_end_sb.valueChanged.connect(self.shot_save) self.shot_handle_sb.valueChanged.connect(self.shot_save) self.shot_desc_pte.textChanged.connect(self.shot_save)
[ "def", "setup_shot_signals", "(", "self", ",", ")", ":", "log", ".", "debug", "(", "\"Setting up shot page signals.\"", ")", "self", ".", "shot_prj_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_view_prj", ")", "self", ".", "shot_seq_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_view_seq", ")", "self", ".", "shot_asset_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_view_asset", ")", "self", ".", "shot_asset_create_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_create_asset", ")", "self", ".", "shot_asset_add_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_add_asset", ")", "self", ".", "shot_asset_remove_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_remove_asset", ")", "self", ".", "shot_task_view_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_view_task", ")", "self", ".", "shot_task_create_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_create_task", ")", "self", ".", "shot_start_sb", ".", "valueChanged", ".", "connect", "(", "self", ".", "shot_save", ")", "self", ".", "shot_end_sb", ".", "valueChanged", ".", "connect", "(", "self", ".", "shot_save", ")", "self", ".", "shot_handle_sb", ".", "valueChanged", ".", "connect", "(", "self", ".", "shot_save", ")", "self", ".", "shot_desc_pte", ".", "textChanged", ".", "connect", "(", "self", ".", "shot_save", ")" ]
50.05
21.8
def namedb_get_names_by_sender( cur, sender, current_block ): """ Given a sender pubkey script, find all the non-expired non-revoked names owned by it. Return None if the sender owns no names. """ unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) query = "SELECT name_records.name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.sender = ? AND name_records.revoked = 0 AND " + unexpired_query + ";" args = (sender,) + unexpired_args name_rows = namedb_query_execute( cur, query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) return names
[ "def", "namedb_get_names_by_sender", "(", "cur", ",", "sender", ",", "current_block", ")", ":", "unexpired_query", ",", "unexpired_args", "=", "namedb_select_where_unexpired_names", "(", "current_block", ")", "query", "=", "\"SELECT name_records.name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id \"", "+", "\"WHERE name_records.sender = ? AND name_records.revoked = 0 AND \"", "+", "unexpired_query", "+", "\";\"", "args", "=", "(", "sender", ",", ")", "+", "unexpired_args", "name_rows", "=", "namedb_query_execute", "(", "cur", ",", "query", ",", "args", ")", "names", "=", "[", "]", "for", "name_row", "in", "name_rows", ":", "names", ".", "append", "(", "name_row", "[", "'name'", "]", ")", "return", "names" ]
36.2
29.8
def _CollectArguments(function, args, kwargs): """Merges positional and keyword arguments into a single dict.""" all_args = dict(kwargs) arg_names = inspect.getargspec(function)[0] for position, arg in enumerate(args): if position < len(arg_names): all_args[arg_names[position]] = arg return all_args
[ "def", "_CollectArguments", "(", "function", ",", "args", ",", "kwargs", ")", ":", "all_args", "=", "dict", "(", "kwargs", ")", "arg_names", "=", "inspect", ".", "getargspec", "(", "function", ")", "[", "0", "]", "for", "position", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "position", "<", "len", "(", "arg_names", ")", ":", "all_args", "[", "arg_names", "[", "position", "]", "]", "=", "arg", "return", "all_args" ]
39.125
7.25
def records( self ): """ Returns the record list that ist linked with this combo box. :return [<orb.Table>, ..] """ records = [] for i in range(self.count()): record = self.recordAt(i) if record: records.append(record) return records
[ "def", "records", "(", "self", ")", ":", "records", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", ")", ":", "record", "=", "self", ".", "recordAt", "(", "i", ")", "if", "record", ":", "records", ".", "append", "(", "record", ")", "return", "records" ]
28.5
11.666667
def _validate_number_of_layers(self, number_of_layers): """ Makes sure that the specified number of layers to squash is a valid number """ # Only positive numbers are correct if number_of_layers <= 0: raise SquashError( "Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers) # Do not squash if provided number of layer to squash is bigger # than number of actual layers in the image if number_of_layers > len(self.old_image_layers): raise SquashError( "Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
[ "def", "_validate_number_of_layers", "(", "self", ",", "number_of_layers", ")", ":", "# Only positive numbers are correct", "if", "number_of_layers", "<=", "0", ":", "raise", "SquashError", "(", "\"Number of layers to squash cannot be less or equal 0, provided: %s\"", "%", "number_of_layers", ")", "# Do not squash if provided number of layer to squash is bigger", "# than number of actual layers in the image", "if", "number_of_layers", ">", "len", "(", "self", ".", "old_image_layers", ")", ":", "raise", "SquashError", "(", "\"Cannot squash %s layers, the %s image contains only %s layers\"", "%", "(", "number_of_layers", ",", "self", ".", "image", ",", "len", "(", "self", ".", "old_image_layers", ")", ")", ")" ]
45.375
24.25
def copy_assets(self, output_path): """Copies all asset files from the source path to the destination path. If no such source path exists, no asset copying will be performed. """ src_paths = [] # if we have a theme if self.config.theme is not None: # assume it's in the folder: "themes/theme_name/assets" src_paths.append(os.path.join( self.path, StatikProject.THEMES_DIR, self.config.theme, StatikProject.ASSETS_DIR )) # NOTE: Adding the theme's assets directory *before* the project's internal assets # directory always ensures that the project's own assets are copied *after* the # theme's, thereby ensuring that the project's assets folder takes precedence # over the theme's. # always attempt to copy from our base assets folder if os.path.isabs(self.config.assets_src_path): src_paths.append(self.config.assets_src_path) else: src_paths.append(os.path.join(self.path, self.config.assets_src_path)) for src_path in src_paths: if os.path.exists(src_path) and os.path.isdir(src_path): dest_path = self.config.assets_dest_path if not os.path.isabs(dest_path): dest_path = os.path.join(output_path, dest_path) asset_count = copy_tree(src_path, dest_path) logger.info("Copied %s asset(s) from %s to %s", asset_count, src_path, dest_path) else: logger.info( "Missing assets source path - skipping copying of assets: %s", src_path )
[ "def", "copy_assets", "(", "self", ",", "output_path", ")", ":", "src_paths", "=", "[", "]", "# if we have a theme", "if", "self", ".", "config", ".", "theme", "is", "not", "None", ":", "# assume it's in the folder: \"themes/theme_name/assets\"", "src_paths", ".", "append", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "StatikProject", ".", "THEMES_DIR", ",", "self", ".", "config", ".", "theme", ",", "StatikProject", ".", "ASSETS_DIR", ")", ")", "# NOTE: Adding the theme's assets directory *before* the project's internal assets", "# directory always ensures that the project's own assets are copied *after* the", "# theme's, thereby ensuring that the project's assets folder takes precedence", "# over the theme's.", "# always attempt to copy from our base assets folder", "if", "os", ".", "path", ".", "isabs", "(", "self", ".", "config", ".", "assets_src_path", ")", ":", "src_paths", ".", "append", "(", "self", ".", "config", ".", "assets_src_path", ")", "else", ":", "src_paths", ".", "append", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "self", ".", "config", ".", "assets_src_path", ")", ")", "for", "src_path", "in", "src_paths", ":", "if", "os", ".", "path", ".", "exists", "(", "src_path", ")", "and", "os", ".", "path", ".", "isdir", "(", "src_path", ")", ":", "dest_path", "=", "self", ".", "config", ".", "assets_dest_path", "if", "not", "os", ".", "path", ".", "isabs", "(", "dest_path", ")", ":", "dest_path", "=", "os", ".", "path", ".", "join", "(", "output_path", ",", "dest_path", ")", "asset_count", "=", "copy_tree", "(", "src_path", ",", "dest_path", ")", "logger", ".", "info", "(", "\"Copied %s asset(s) from %s to %s\"", ",", "asset_count", ",", "src_path", ",", "dest_path", ")", "else", ":", "logger", ".", "info", "(", "\"Missing assets source path - skipping copying of assets: %s\"", ",", "src_path", ")" ]
44.769231
22.461538
def mon_hosts(mons): """ Iterate through list of MON hosts, return tuples of (name, host). """ for m in mons: if m.count(':'): (name, host) = m.split(':') else: name = m host = m if name.count('.') > 0: name = name.split('.')[0] yield (name, host)
[ "def", "mon_hosts", "(", "mons", ")", ":", "for", "m", "in", "mons", ":", "if", "m", ".", "count", "(", "':'", ")", ":", "(", "name", ",", "host", ")", "=", "m", ".", "split", "(", "':'", ")", "else", ":", "name", "=", "m", "host", "=", "m", "if", "name", ".", "count", "(", "'.'", ")", ">", "0", ":", "name", "=", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", "yield", "(", "name", ",", "host", ")" ]
26.076923
13.461538
def delete_custom_view(auth, url, name): """ function takes input of auth, url, and name and issues a RESTFUL call to delete a specific of custom views from HPE IMC. :param name: string containg the name of the desired custom view :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: str of creation results ( "view " + name + "created successfully" :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.groups import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> delete_custom_view(auth.creds, auth.url, name = "L1 View") 'View L1 View deleted successfully' >>> view_1 =get_custom_views( auth.creds, auth.url, name = 'L1 View') >>> assert view_1 is None >>> delete_custom_view(auth.creds, auth.url, name = "L2 View") 'View L2 View deleted successfully' >>> view_2 =get_custom_views( auth.creds, auth.url, name = 'L2 View') >>> assert view_2 is None """ view_id = get_custom_views(auth, url, name) if view_id is None: print("View " + name + " doesn't exists") return view_id view_id = get_custom_views(auth, url, name)[0]['symbolId'] delete_custom_view_url = '/imcrs/plat/res/view/custom/' + str(view_id) f_url = url + delete_custom_view_url response = requests.delete(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 204: print('View ' + name + ' deleted successfully') return response.status_code else: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' delete_custom_view: An Error has occured'
[ "def", "delete_custom_view", "(", "auth", ",", "url", ",", "name", ")", ":", "view_id", "=", "get_custom_views", "(", "auth", ",", "url", ",", "name", ")", "if", "view_id", "is", "None", ":", "print", "(", "\"View \"", "+", "name", "+", "\" doesn't exists\"", ")", "return", "view_id", "view_id", "=", "get_custom_views", "(", "auth", ",", "url", ",", "name", ")", "[", "0", "]", "[", "'symbolId'", "]", "delete_custom_view_url", "=", "'/imcrs/plat/res/view/custom/'", "+", "str", "(", "view_id", ")", "f_url", "=", "url", "+", "delete_custom_view_url", "response", "=", "requests", ".", "delete", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "if", "response", ".", "status_code", "==", "204", ":", "print", "(", "'View '", "+", "name", "+", "' deleted successfully'", ")", "return", "response", ".", "status_code", "else", ":", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "' delete_custom_view: An Error has occured'" ]
34.788462
25.096154
def reconfigure_services(self, *service_names): """ Update all files for one or more registered services, and, if ready, optionally restart them. If no service names are given, reconfigures all registered services. """ for service_name in service_names or self.services.keys(): if self.is_ready(service_name): self.fire_event('data_ready', service_name) self.fire_event('start', service_name, default=[ service_restart, manage_ports]) self.save_ready(service_name) else: if self.was_ready(service_name): self.fire_event('data_lost', service_name) self.fire_event('stop', service_name, default=[ manage_ports, service_stop]) self.save_lost(service_name)
[ "def", "reconfigure_services", "(", "self", ",", "*", "service_names", ")", ":", "for", "service_name", "in", "service_names", "or", "self", ".", "services", ".", "keys", "(", ")", ":", "if", "self", ".", "is_ready", "(", "service_name", ")", ":", "self", ".", "fire_event", "(", "'data_ready'", ",", "service_name", ")", "self", ".", "fire_event", "(", "'start'", ",", "service_name", ",", "default", "=", "[", "service_restart", ",", "manage_ports", "]", ")", "self", ".", "save_ready", "(", "service_name", ")", "else", ":", "if", "self", ".", "was_ready", "(", "service_name", ")", ":", "self", ".", "fire_event", "(", "'data_lost'", ",", "service_name", ")", "self", ".", "fire_event", "(", "'stop'", ",", "service_name", ",", "default", "=", "[", "manage_ports", ",", "service_stop", "]", ")", "self", ".", "save_lost", "(", "service_name", ")" ]
42.904762
13.857143
def visit_Module(self, node): """ Import module define a new variable name. """ duc = SilentDefUseChains() duc.visit(node) for d in duc.locals[node]: self.result[d.name()] = d.node
[ "def", "visit_Module", "(", "self", ",", "node", ")", ":", "duc", "=", "SilentDefUseChains", "(", ")", "duc", ".", "visit", "(", "node", ")", "for", "d", "in", "duc", ".", "locals", "[", "node", "]", ":", "self", ".", "result", "[", "d", ".", "name", "(", ")", "]", "=", "d", ".", "node" ]
36.5
7
def seiffert_mean(nums): r"""Return Seiffert's mean. Seiffert's mean of two numbers x and y is: :math:`\frac{x - y}{4 \cdot arctan \sqrt{\frac{x}{y}} - \pi}` It is defined in :cite:`Seiffert:1993`. Parameters ---------- nums : list A series of numbers Returns ------- float Sieffert's mean of nums Raises ------ AttributeError seiffert_mean supports no more than two values Examples -------- >>> seiffert_mean([1, 2]) 1.4712939827611637 >>> seiffert_mean([1, 0]) 0.3183098861837907 >>> seiffert_mean([2, 4]) 2.9425879655223275 >>> seiffert_mean([2, 1000]) 336.84053300118825 """ if len(nums) == 1: return nums[0] if len(nums) > 2: raise AttributeError('seiffert_mean supports no more than two values') if nums[0] + nums[1] == 0 or nums[0] - nums[1] == 0: return float('NaN') return (nums[0] - nums[1]) / ( 2 * math.asin((nums[0] - nums[1]) / (nums[0] + nums[1])) )
[ "def", "seiffert_mean", "(", "nums", ")", ":", "if", "len", "(", "nums", ")", "==", "1", ":", "return", "nums", "[", "0", "]", "if", "len", "(", "nums", ")", ">", "2", ":", "raise", "AttributeError", "(", "'seiffert_mean supports no more than two values'", ")", "if", "nums", "[", "0", "]", "+", "nums", "[", "1", "]", "==", "0", "or", "nums", "[", "0", "]", "-", "nums", "[", "1", "]", "==", "0", ":", "return", "float", "(", "'NaN'", ")", "return", "(", "nums", "[", "0", "]", "-", "nums", "[", "1", "]", ")", "/", "(", "2", "*", "math", ".", "asin", "(", "(", "nums", "[", "0", "]", "-", "nums", "[", "1", "]", ")", "/", "(", "nums", "[", "0", "]", "+", "nums", "[", "1", "]", ")", ")", ")" ]
22.727273
22.045455
def install_as_egg(self, destination_eggdir): '''Install wheel as an egg directory.''' with zipfile.ZipFile(self.filename) as zf: self._install_as_egg(destination_eggdir, zf)
[ "def", "install_as_egg", "(", "self", ",", "destination_eggdir", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "self", ".", "filename", ")", "as", "zf", ":", "self", ".", "_install_as_egg", "(", "destination_eggdir", ",", "zf", ")" ]
49.75
9.75
def mkchange(text0,text1,version,mtime): "return a Change diffing the two strings" return Change(version,mtime,ucrc(text1),diff.word_diff(text0,text1))
[ "def", "mkchange", "(", "text0", ",", "text1", ",", "version", ",", "mtime", ")", ":", "return", "Change", "(", "version", ",", "mtime", ",", "ucrc", "(", "text1", ")", ",", "diff", ".", "word_diff", "(", "text0", ",", "text1", ")", ")" ]
51.666667
11.666667
def _set_static_ag_ip_config(self, v, load=False): """ Setter method for static_ag_ip_config, mapped from YANG variable /rbridge_id/ip/static_ag_ip_config (container) If this variable is read-only (config: false) in the source YANG file, then _set_static_ag_ip_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_static_ag_ip_config() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=static_ag_ip_config.static_ag_ip_config, is_container='container', presence=False, yang_name="static-ag-ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IpAnycastGatewayMacCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """static_ag_ip_config must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=static_ag_ip_config.static_ag_ip_config, is_container='container', presence=False, yang_name="static-ag-ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IpAnycastGatewayMacCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""", }) self.__static_ag_ip_config = t if hasattr(self, '_set'): self._set()
[ "def", "_set_static_ag_ip_config", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "static_ag_ip_config", ".", "static_ag_ip_config", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"static-ag-ip-config\"", ",", "rest_name", "=", "\"\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'IpAnycastGatewayMacCallpoint'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vrrp'", ",", "defining_module", "=", "'brocade-vrrp'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"static_ag_ip_config must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=static_ag_ip_config.static_ag_ip_config, is_container='container', presence=False, yang_name=\"static-ag-ip-config\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IpAnycastGatewayMacCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__static_ag_ip_config", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
80.045455
37.318182
def parse_expression(clause): """ For a clause that could be a field, value, or expression """ if isinstance(clause, Expression): return clause elif hasattr(clause, "getName") and clause.getName() != "field": if clause.getName() == "nested": return AttributeSelection.from_statement(clause) elif clause.getName() == "function": return SelectFunction.from_statement(clause) else: return Value(resolve(clause[0])) else: return Field(clause[0])
[ "def", "parse_expression", "(", "clause", ")", ":", "if", "isinstance", "(", "clause", ",", "Expression", ")", ":", "return", "clause", "elif", "hasattr", "(", "clause", ",", "\"getName\"", ")", "and", "clause", ".", "getName", "(", ")", "!=", "\"field\"", ":", "if", "clause", ".", "getName", "(", ")", "==", "\"nested\"", ":", "return", "AttributeSelection", ".", "from_statement", "(", "clause", ")", "elif", "clause", ".", "getName", "(", ")", "==", "\"function\"", ":", "return", "SelectFunction", ".", "from_statement", "(", "clause", ")", "else", ":", "return", "Value", "(", "resolve", "(", "clause", "[", "0", "]", ")", ")", "else", ":", "return", "Field", "(", "clause", "[", "0", "]", ")" ]
40.076923
13.153846
def extract_from_system(cert_callback=None, callback_only_on_failure=False): """ Extracts trusted CA certificates from the Windows certificate store :param cert_callback: A callback that is called once for each certificate in the trust store. It should accept two parameters: an asn1crypto.x509.Certificate object, and a reason. The reason will be None if the certificate is being exported, otherwise it will be a unicode string of the reason it won't. :param callback_only_on_failure: A boolean - if the callback should only be called when a certificate is not exported. :raises: OSError - when an error is returned by the OS crypto library :return: A list of 3-element tuples: - 0: a byte string of a DER-encoded certificate - 1: a set of unicode strings that are OIDs of purposes to trust the certificate for - 2: a set of unicode strings that are OIDs of purposes to reject the certificate for """ certificates = {} processed = {} now = datetime.datetime.utcnow() for store in ["ROOT", "CA"]: store_handle = crypt32.CertOpenSystemStoreW(null(), store) handle_error(store_handle) context_pointer = null() while True: context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer) if is_null(context_pointer): break context = unwrap(context_pointer) trust_all = False data = None digest = None if context.dwCertEncodingType != Crypt32Const.X509_ASN_ENCODING: continue data = bytes_from_buffer(context.pbCertEncoded, int(context.cbCertEncoded)) digest = hashlib.sha1(data).digest() if digest in processed: continue processed[digest] = True cert_info = unwrap(context.pCertInfo) not_before_seconds = _convert_filetime_to_timestamp(cert_info.NotBefore) try: not_before = datetime.datetime.fromtimestamp(not_before_seconds) if not_before > now: if cert_callback: cert_callback(Certificate.load(data), 'not yet valid') continue except (ValueError, OSError): # If there is an error converting the not before timestamp, # it is almost certainly because it is from too long ago, # which means the cert is definitely valid by now. pass not_after_seconds = _convert_filetime_to_timestamp(cert_info.NotAfter) try: not_after = datetime.datetime.fromtimestamp(not_after_seconds) if not_after < now: if cert_callback: cert_callback(Certificate.load(data), 'no longer valid') continue except (ValueError, OSError) as e: # The only reason we would get an exception here is if the # expiration time is so far in the future that it can't be # used as a timestamp, or it is before 0. If it is very far # in the future, the cert is still valid, so we only raise # an exception if the timestamp is less than zero. if not_after_seconds < 0: message = e.args[0] + ' - ' + str_cls(not_after_seconds) e.args = (message,) + e.args[1:] raise e trust_oids = set() reject_oids = set() # Here we grab the extended key usage properties that Windows # layers on top of the extended key usage extension that is # part of the certificate itself. For highest security, users # should only use certificates for the intersection of the two # lists of purposes. However, many seen to treat the OS trust # list as an override. to_read = new(crypt32, 'DWORD *', 0) res = crypt32.CertGetEnhancedKeyUsage( context_pointer, Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG, null(), to_read ) # Per the Microsoft documentation, if CRYPT_E_NOT_FOUND is returned # from get_error(), it means the certificate is valid for all purposes error_code, _ = get_error() if not res and error_code != Crypt32Const.CRYPT_E_NOT_FOUND: handle_error(res) if error_code == Crypt32Const.CRYPT_E_NOT_FOUND: trust_all = True else: usage_buffer = buffer_from_bytes(deref(to_read)) res = crypt32.CertGetEnhancedKeyUsage( context_pointer, Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG, cast(crypt32, 'CERT_ENHKEY_USAGE *', usage_buffer), to_read ) handle_error(res) key_usage_pointer = struct_from_buffer(crypt32, 'CERT_ENHKEY_USAGE', usage_buffer) key_usage = unwrap(key_usage_pointer) # Having no enhanced usage properties means a cert is distrusted if key_usage.cUsageIdentifier == 0: if cert_callback: cert_callback(Certificate.load(data), 'explicitly distrusted') continue oids = array_from_pointer( crypt32, 'LPCSTR', key_usage.rgpszUsageIdentifier, key_usage.cUsageIdentifier ) for oid in oids: trust_oids.add(oid.decode('ascii')) cert = None # If the certificate is not under blanket trust, we have to # determine what purposes it is rejected for by diffing the # set of OIDs from the certificate with the OIDs that are # trusted. if not trust_all: cert = Certificate.load(data) if cert.extended_key_usage_value: for cert_oid in cert.extended_key_usage_value: oid = cert_oid.dotted if oid not in trust_oids: reject_oids.add(oid) if cert_callback and not callback_only_on_failure: if cert is None: cert = Certificate.load(data) cert_callback(cert, None) certificates[digest] = (data, trust_oids, reject_oids) result = crypt32.CertCloseStore(store_handle, 0) handle_error(result) store_handle = None return certificates.values()
[ "def", "extract_from_system", "(", "cert_callback", "=", "None", ",", "callback_only_on_failure", "=", "False", ")", ":", "certificates", "=", "{", "}", "processed", "=", "{", "}", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "for", "store", "in", "[", "\"ROOT\"", ",", "\"CA\"", "]", ":", "store_handle", "=", "crypt32", ".", "CertOpenSystemStoreW", "(", "null", "(", ")", ",", "store", ")", "handle_error", "(", "store_handle", ")", "context_pointer", "=", "null", "(", ")", "while", "True", ":", "context_pointer", "=", "crypt32", ".", "CertEnumCertificatesInStore", "(", "store_handle", ",", "context_pointer", ")", "if", "is_null", "(", "context_pointer", ")", ":", "break", "context", "=", "unwrap", "(", "context_pointer", ")", "trust_all", "=", "False", "data", "=", "None", "digest", "=", "None", "if", "context", ".", "dwCertEncodingType", "!=", "Crypt32Const", ".", "X509_ASN_ENCODING", ":", "continue", "data", "=", "bytes_from_buffer", "(", "context", ".", "pbCertEncoded", ",", "int", "(", "context", ".", "cbCertEncoded", ")", ")", "digest", "=", "hashlib", ".", "sha1", "(", "data", ")", ".", "digest", "(", ")", "if", "digest", "in", "processed", ":", "continue", "processed", "[", "digest", "]", "=", "True", "cert_info", "=", "unwrap", "(", "context", ".", "pCertInfo", ")", "not_before_seconds", "=", "_convert_filetime_to_timestamp", "(", "cert_info", ".", "NotBefore", ")", "try", ":", "not_before", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "not_before_seconds", ")", "if", "not_before", ">", "now", ":", "if", "cert_callback", ":", "cert_callback", "(", "Certificate", ".", "load", "(", "data", ")", ",", "'not yet valid'", ")", "continue", "except", "(", "ValueError", ",", "OSError", ")", ":", "# If there is an error converting the not before timestamp,", "# it is almost certainly because it is from too long ago,", "# which means the cert is definitely valid by now.", "pass", "not_after_seconds", "=", "_convert_filetime_to_timestamp", "(", "cert_info", ".", "NotAfter", ")", "try", ":", "not_after", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "not_after_seconds", ")", "if", "not_after", "<", "now", ":", "if", "cert_callback", ":", "cert_callback", "(", "Certificate", ".", "load", "(", "data", ")", ",", "'no longer valid'", ")", "continue", "except", "(", "ValueError", ",", "OSError", ")", "as", "e", ":", "# The only reason we would get an exception here is if the", "# expiration time is so far in the future that it can't be", "# used as a timestamp, or it is before 0. If it is very far", "# in the future, the cert is still valid, so we only raise", "# an exception if the timestamp is less than zero.", "if", "not_after_seconds", "<", "0", ":", "message", "=", "e", ".", "args", "[", "0", "]", "+", "' - '", "+", "str_cls", "(", "not_after_seconds", ")", "e", ".", "args", "=", "(", "message", ",", ")", "+", "e", ".", "args", "[", "1", ":", "]", "raise", "e", "trust_oids", "=", "set", "(", ")", "reject_oids", "=", "set", "(", ")", "# Here we grab the extended key usage properties that Windows", "# layers on top of the extended key usage extension that is", "# part of the certificate itself. For highest security, users", "# should only use certificates for the intersection of the two", "# lists of purposes. However, many seen to treat the OS trust", "# list as an override.", "to_read", "=", "new", "(", "crypt32", ",", "'DWORD *'", ",", "0", ")", "res", "=", "crypt32", ".", "CertGetEnhancedKeyUsage", "(", "context_pointer", ",", "Crypt32Const", ".", "CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG", ",", "null", "(", ")", ",", "to_read", ")", "# Per the Microsoft documentation, if CRYPT_E_NOT_FOUND is returned", "# from get_error(), it means the certificate is valid for all purposes", "error_code", ",", "_", "=", "get_error", "(", ")", "if", "not", "res", "and", "error_code", "!=", "Crypt32Const", ".", "CRYPT_E_NOT_FOUND", ":", "handle_error", "(", "res", ")", "if", "error_code", "==", "Crypt32Const", ".", "CRYPT_E_NOT_FOUND", ":", "trust_all", "=", "True", "else", ":", "usage_buffer", "=", "buffer_from_bytes", "(", "deref", "(", "to_read", ")", ")", "res", "=", "crypt32", ".", "CertGetEnhancedKeyUsage", "(", "context_pointer", ",", "Crypt32Const", ".", "CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG", ",", "cast", "(", "crypt32", ",", "'CERT_ENHKEY_USAGE *'", ",", "usage_buffer", ")", ",", "to_read", ")", "handle_error", "(", "res", ")", "key_usage_pointer", "=", "struct_from_buffer", "(", "crypt32", ",", "'CERT_ENHKEY_USAGE'", ",", "usage_buffer", ")", "key_usage", "=", "unwrap", "(", "key_usage_pointer", ")", "# Having no enhanced usage properties means a cert is distrusted", "if", "key_usage", ".", "cUsageIdentifier", "==", "0", ":", "if", "cert_callback", ":", "cert_callback", "(", "Certificate", ".", "load", "(", "data", ")", ",", "'explicitly distrusted'", ")", "continue", "oids", "=", "array_from_pointer", "(", "crypt32", ",", "'LPCSTR'", ",", "key_usage", ".", "rgpszUsageIdentifier", ",", "key_usage", ".", "cUsageIdentifier", ")", "for", "oid", "in", "oids", ":", "trust_oids", ".", "add", "(", "oid", ".", "decode", "(", "'ascii'", ")", ")", "cert", "=", "None", "# If the certificate is not under blanket trust, we have to", "# determine what purposes it is rejected for by diffing the", "# set of OIDs from the certificate with the OIDs that are", "# trusted.", "if", "not", "trust_all", ":", "cert", "=", "Certificate", ".", "load", "(", "data", ")", "if", "cert", ".", "extended_key_usage_value", ":", "for", "cert_oid", "in", "cert", ".", "extended_key_usage_value", ":", "oid", "=", "cert_oid", ".", "dotted", "if", "oid", "not", "in", "trust_oids", ":", "reject_oids", ".", "add", "(", "oid", ")", "if", "cert_callback", "and", "not", "callback_only_on_failure", ":", "if", "cert", "is", "None", ":", "cert", "=", "Certificate", ".", "load", "(", "data", ")", "cert_callback", "(", "cert", ",", "None", ")", "certificates", "[", "digest", "]", "=", "(", "data", ",", "trust_oids", ",", "reject_oids", ")", "result", "=", "crypt32", ".", "CertCloseStore", "(", "store_handle", ",", "0", ")", "handle_error", "(", "result", ")", "store_handle", "=", "None", "return", "certificates", ".", "values", "(", ")" ]
40.443114
22.97006
def setup_logging(args): """ This sets up the logging. Needs the args to get the log level supplied :param args: The command line arguments """ handler = logging.StreamHandler() handler.setLevel(args.log_level) formatter = logging.Formatter(('%(asctime)s - ' '%(name)s - ' '%(levelname)s - ' '%(message)s')) handler.setFormatter(formatter) LOGGER.addHandler(handler)
[ "def", "setup_logging", "(", "args", ")", ":", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setLevel", "(", "args", ".", "log_level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "(", "'%(asctime)s - '", "'%(name)s - '", "'%(levelname)s - '", "'%(message)s'", ")", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "LOGGER", ".", "addHandler", "(", "handler", ")" ]
33.2
9.466667
def is_email(self, address, diagnose=False): """Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False) """ threshold = BaseDiagnosis.CATEGORIES['VALID'] return_status = [ValidDiagnosis()] parse_data = {} # Parse the address into components, character by character raw_length = len(address) context = Context.LOCALPART # Where we are context_stack = [context] # Where we've been context_prior = Context.LOCALPART # Where we just came from token = '' # The current character token_prior = '' # The previous character parse_data[Context.LOCALPART] = '' # The address' components parse_data[Context.DOMAIN] = '' atom_list = { Context.LOCALPART: [''], Context.DOMAIN: [''] } # The address' dot-atoms element_count = 0 element_len = 0 hyphen_flag = False # Hyphen cannot occur at the end of a subdomain end_or_die = False # CFWS can only appear at the end of an element skip = False # Skip flag that simulates i++ crlf_count = -1 # crlf_count = -1 == !isset(crlf_count) for i in _range(raw_length): # Skip simulates the use of ++ operator if skip: skip = False continue token = address[i] token = to_char(token) # Switch to simulate decrementing; needed for FWS repeat = True while repeat: repeat = False # ------------------------------------------------------- # Local part # ------------------------------------------------------- if context == Context.LOCALPART: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # local-part = dot-atom / quoted-string / # obs-local-part # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # obs-local-part = word *("." word) # # word = atom / quoted-string # # atom = [CFWS] 1*atext [CFWS] if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments are OK at the beginning of an element if element_count == 0: return_status.append(CFWSDiagnosis('COMMENT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) else: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if end_or_die: return_status.append( DeprecatedDiagnosis('LOCALPART')) # CFWS & quoted strings are OK again now we're at # the beginning of an element (although they are # obsolete forms) end_or_die = False element_len = 0 element_count += 1 parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART].append('') elif token == Char.DQUOTE: if element_len == 0: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if element_count == 0: return_status.append( RFC5321Diagnosis('QUOTEDSTRING')) else: return_status.append( DeprecatedDiagnosis('LOCALPART')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 end_or_die = True context_stack.append(context) context = Context.QUOTEDSTRING else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append(CFWSDiagnosis('FWS')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # @ elif token == Char.AT: # At this point we should have a valid local-part if len(context_stack) != 1: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False if parse_data[Context.LOCALPART] == '': # Fatal error return_status.append( InvalidDiagnosis('NOLOCALPART')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1 # The maximum total length of a user name or other # local-part is 64 octets. elif len(parse_data[Context.LOCALPART]) > 64: return_status.append( RFC5322Diagnosis('LOCAL_TOOLONG')) # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Comments and folding white space # SHOULD NOT be used around the "@" in the addr-spec. # # http://tools.ietf.org/html/rfc2119 # 4. SHOULD NOT This phrase, or the phrase "NOT # RECOMMENDED" mean that there may exist valid # reasons in particular circumstances when the # particular behavior is acceptable or even useful, # but the full implications should be understood and # the case carefully weighed before implementing any # behavior described with this label. elif context_prior in [Context.COMMENT, Context.FWS]: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) # Clear everything down for the domain parsing context = Context.DOMAIN context_stack = [] element_count = 0 element_len = 0 # CFWS can only appear at the end of the element end_or_die = False # atext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.QUOTEDSTRING: return_status.append( InvalidDiagnosis('ATEXT_AFTER_QS')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False else: context_prior = context o = ord(token) if (o < 33 or o > 126 or o == 10 or token in Char.SPECIALS): return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain # ------------------------------------------------------- elif context == Context.DOMAIN: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain = dot-atom / domain-literal / obs-domain # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-domain = atom *("." atom) # # atom = [CFWS] 1*atext [CFWS] # # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Mailbox = Local-part # "@" # ( Domain / address-literal ) # # Domain = sub-domain *("." sub-domain) # # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Note: A liberal syntax for the domain portion of # addr-spec is given here. However, the domain portion # contains addressing information specified by and # used in other protocols (e.g., RFC 1034, RFC 1035, # RFC 1123, RFC5321). It is therefore incumbent upon # implementations to conform to the syntax of # addresse for the context in which they are used. # is_email() author's note: it's not clear how to interpret # this in the context of a general address address # validator. The conclusion I have reached is this: # "addressing information" must comply with RFC 5321 (and # in turn RFC 1035), anything that is "semantically # invisible" must comply only with RFC 5322. # Comment if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments at the start of the domain are # deprecated in the text # Comments at the start of a subdomain are # obs-domain # (http://tools.ietf.org/html/rfc5322#section-3.4.1) if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT # Next dot-atom element elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) elif hyphen_flag: # Previous subdomain ended in a hyphen. Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENEND')) else: # Nowhere in RFC 5321 does it say explicitly that # the domain part of a Mailbox must be a valid # domain according to the DNS standards set out in # RFC 1035, but this *is* implied in several # places. For instance, wherever the idea of host # routing is discussed the RFC says that the domain # must be looked up in the DNS. This would be # nonsense unless the domain was designed to be a # valid DNS domain. Hence we must conclude that the # RFC 1035 restriction on label length also applies # to RFC 5321 domains. # # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less if element_len > 63: return_status.append( RFC5322Diagnosis('LABEL_TOOLONG')) # CFWS is OK again now we're at the beginning of an # element (although it may be obsolete CFWS) end_or_die = False element_len = 0 element_count += 1 atom_list[Context.DOMAIN].append('') parse_data[Context.DOMAIN] += token # Domain literal elif token == Char.OPENSQBRACKET: if parse_data[Context.DOMAIN] == '': # Domain literal must be the only component end_or_die = True element_len += 1 context_stack.append(context) context = Context.LITERAL parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token parse_data['literal'] = '' else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if i+1 == raw_length or (to_char(address[i + 1]) != Char.LF): # Fatal error return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: return_status.append(CFWSDiagnosis('FWS')) # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # atext else: # RFC 5322 allows any atext... # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" # But RFC 5321 only allows letter-digit-hyphen to # comply with DNS rules (RFCs 1034 & 1123) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # sub-domain = Let-dig [Ldh-str] # # Let-dig = ALPHA / DIGIT # # Ldh-str = *( ALPHA / DIGIT / "-" ) Let-dig # if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.LITERAL: return_status.append( InvalidDiagnosis('ATEXT_AFTER_DOMLIT')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False o = ord(token) # Assume this token isn't a hyphen unless we discover # it is hyphen_flag = False if o < 33 or o > 126 or token in Char.SPECIALS: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) elif token == Char.HYPHEN: if element_len == 0: # Hyphens can't be at the beginning of a # subdomain # Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENSTART')) hyphen_flag = True elif not (47 < o < 58 or 64 < o < 91 or 96 < o < 123): # Not an RFC 5321 subdomain, but still OK by RFC # 5322 return_status.append(RFC5322Diagnosis('DOMAIN')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain literal # ------------------------------------------------------- elif context == Context.LITERAL: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # End of domain literal if token == Char.CLOSESQBRACKET: if (max(return_status) < BaseDiagnosis.CATEGORIES['DEPREC']): # Could be a valid RFC 5321 address literal, so # let's check # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5321#section-4.1.3 # IPv4-address-literal = Snum 3("." Snum) # # IPv6-address-literal = "IPv6:" IPv6-addr # # General-address-literal = Standardized-tag ":" # 1*dcontent # # Standardized-tag = Ldh-str # ; Standardized-tag MUST be # ; specified in a # ; Standards-Track RFC and # ; registered with IANA # # dcontent = %d33-90 / ; Printable US-ASCII # %d94-126 ; excl. "[", "\", "]" # # Snum = 1*3DIGIT # ; representing a decimal integer # ; value in the range 0-255 # # IPv6-addr = IPv6-full / IPv6-comp / # IPv6v4-full / IPv6v4-comp # # IPv6-hex = 1*4HEXDIG # # IPv6-full = IPv6-hex 7(":" IPv6-hex) # # IPv6-comp = [IPv6-hex *5(":" IPv6-hex)] # "::" # [IPv6-hex *5(":" IPv6-hex)] # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 6 groups in addition to # ; the "::" may be present. # # IPv6v4-full = IPv6-hex 5(":" IPv6-hex) ":" # IPv4-address-literal # # IPv6v4-comp = [IPv6-hex *3(":" IPv6-hex)] # "::" # [IPv6-hex *3(":" IPv6-hex) ":"] # IPv4-address-literal # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 4 groups in addition to # ; the "::" and # ; IPv4-address-literal may be # ; present. max_groups = 8 index = False address_literal = parse_data['literal'] # Extract IPv4 part from the end of the # address-literal (if there is one) regex = ( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)" r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) match_ip = re.search(regex, address_literal) if match_ip: index = address_literal.rfind( match_ip.group(0)) if index != 0: # Convert IPv4 part to IPv6 format for # further testing address_literal = ( address_literal[0:index] + '0:0') if index == 0 and index is not False: # Nothing there except a valid IPv4 address return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) elif not address_literal.startswith(Char.IPV6TAG): return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) else: ipv6 = address_literal[5:] # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy match_ip = ipv6.split(Char.COLON) grp_count = len(match_ip) index = ipv6.find(Char.DOUBLECOLON) if index == -1: # We need exactly the right number of # groups if grp_count != max_groups: return_status.append( RFC5322Diagnosis('IPV6_GRPCOUNT')) else: if index != ipv6.rfind(Char.DOUBLECOLON): return_status.append( RFC5322Diagnosis('IPV6_2X2XCOLON')) else: if index in [0, len(ipv6) - 2]: # RFC 4291 allows :: at the start # or end of an address with 7 other # groups in addition max_groups += 1 if grp_count > max_groups: return_status.append( RFC5322Diagnosis( 'IPV6_MAXGRPS')) elif grp_count == max_groups: # Eliding a single "::" return_status.append( RFC5321Diagnosis( 'IPV6DEPRECATED')) # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy if (ipv6[0] == Char.COLON and ipv6[1] != Char.COLON): # Address starts with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONSTRT')) elif (ipv6[-1] == Char.COLON and ipv6[-2] != Char.COLON): # Address ends with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONEND')) elif ([re.match(r"^[0-9A-Fa-f]{0,4}$", i) for i in match_ip].count(None) != 0): # Check for unmatched characters return_status.append( RFC5322Diagnosis('IPV6_BADCHAR')) else: return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) else: return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() elif token == Char.BACKSLASH: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # dtext else: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the # %d14-31 / ; carriage return, line # %d127 ; feed, and white space # ; characters o = ord(token) # CR, LF, SP & HTAB have already been parsed above if o > 127 or o == 0 or token == Char.OPENSQBRACKET: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_DTEXT')) break elif o < 33 or o == 127: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) parse_data['literal'] += token parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted string # ------------------------------------------------------- elif context == Context.QUOTEDSTRING: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # qcontent = qtext / quoted-pair # Quoted pair if token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) # Inside a quoted string, spaces are allow as regular # characters. It's only FWS if we include HTAB or CRLF elif token in [Char.CR, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break # http://tools.ietf.org/html/rfc5322#section-3.2.2 # Runs of FWS, comment, or CFWS that occur between # lexical tokens in a structured header field are # semantically interpreted as a single space # character. # http://tools.ietf.org/html/rfc5322#section-3.2.4 # the CRLF in any FWS/CFWS that appears within the # quoted string [is] semantically "invisible" and # therefore not part of the quoted-string parse_data[Context.LOCALPART] += Char.SP atom_list[Context.LOCALPART][element_count] += Char.SP element_len += 1 return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # End of quoted string elif token == Char.DQUOTE: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() # qtext else: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # qtext = %d33 / ; Printable US-ASCII # %d35-91 / ; characters not # %d93-126 / ; including "\" or # obs-qtext ; the quote # ; character # # obs-qtext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the CR, # %d14-31 / ; LF, and white space # %d127 ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QTEXT')) elif o < 32 or o == 127: return_status.append( DeprecatedDiagnosis('QTEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted pair # ------------------------------------------------------- elif context == Context.QUOTEDPAIR: # http://tools.ietf.org/html/rfc5322#section-3.2.1 # quoted-pair = ("\" (VCHAR / WSP)) / obs-qp # # VCHAR = %d33-126 ; visible (printing) # ; characters # # WSP = SP / HTAB ; white space # # obs-qp = "\" (%d0 / obs-NO-WS-CTL / LF / CR) # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do not # %d12 / ; include the carriage # %d14-31 / ; return, line feed, and # %d127 ; white space characters # # i.e. obs-qp = "\" (%d0-8, %d10-31 / %d127) o = ord(token) if o > 127: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QPAIR')) elif (o < 31 and o != 9) or o == 127: # SP & HTAB are allowed return_status.append(DeprecatedDiagnosis('QP')) # At this point we know where this qpair occurred so # we could check to see if the character actually # needed to be quoted at all. # http://tools.ietf.org/html/rfc5321#section-4.1.2 # the sending system SHOULD transmit the # form that uses the minimum quoting possible. context_prior = context context = context_stack.pop() # End of qpair token = Char.BACKSLASH + token if context == Context.COMMENT: pass elif context == Context.QUOTEDSTRING: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 elif context == Context.LITERAL: parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # ------------------------------------------------------- # Comment # ------------------------------------------------------- elif context == Context.COMMENT: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # comment = "(" *([FWS] ccontent) [FWS] ")" # # ccontent = ctext / quoted-pair / comment # Nested comment if token == Char.OPENPARENTHESIS: # Nested comments are OK context_stack.append(context) context = Context.COMMENT # End of comment elif token == Char.CLOSEPARENTHESIS: context_prior = context context = context_stack.pop() # Quoted pair elif token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # ctext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # ctext = %d33-39 / ; Printable US- # %d42-91 / ; ASCII characters # %d93-126 / ; not including # obs-ctext ; "(", ")", or "\" # # obs-ctext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that # %d12 / ; do not include # %d14-31 / ; the CR, LF, and # ; white space # ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_CTEXT')) break elif o < 32 or o == 127: return_status.append(DeprecatedDiagnosis('CTEXT')) # ------------------------------------------------------- # Folding White Space (FWS) # ------------------------------------------------------- elif context == Context.FWS: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # FWS = ([*WSP CRLF] 1*WSP) / obs-FWS # ; Folding white space # # But note the erratum: # http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908 # In the obsolete syntax, any amount of folding white # space MAY be inserted where the obs-FWS rule is # allowed. This creates the possibility of having two # consecutive "folds" in a line, and therefore the # possibility that a line which makes up a folded header # field could be composed entirely of white space. # # obs-FWS = 1*([CRLF] WSP) if token_prior == Char.CR: if token == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_X2')) break if crlf_count != -1: crlf_count += 1 if crlf_count > 1: # Multiple folds = obsolete FWS return_status.append( DeprecatedDiagnosis('FWS')) else: crlf_count = 1 # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append(InvalidDiagnosis('CR_NO_LF')) break elif token in [Char.SP, Char.HTAB]: pass else: if token_prior == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_END')) break if crlf_count != -1: crlf_count = -1 context_prior = context # End of FWS context = context_stack.pop() # Look at this token again in the parent context repeat = True token_prior = token # ------------------------------------------------------- # A context we aren't expecting # ------------------------------------------------------- else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # No point in going on if we've got a fatal error if max(return_status) > BaseDiagnosis.CATEGORIES['RFC5322']: break # Some simple final tests if max(return_status) < BaseDiagnosis.CATEGORIES['RFC5322']: if context == Context.QUOTEDSTRING: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDQUOTEDSTR')) elif context == Context.QUOTEDPAIR: # Fatal error return_status.append(InvalidDiagnosis('BACKSLASHEND')) elif context == Context.COMMENT: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDCOMMENT')) elif context == Context.LITERAL: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDDOMLIT')) elif token == Char.CR: # Fatal error return_status.append(InvalidDiagnosis('FWS_CRLF_END')) elif parse_data[Context.DOMAIN] == '': # Fatal error return_status.append(InvalidDiagnosis('NODOMAIN')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) elif hyphen_flag: # Fatal error return_status.append(InvalidDiagnosis('DOMAINHYPHENEND')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2 # The maximum total length of a domain name or number is 255 octets elif len(parse_data[Context.DOMAIN]) > 255: return_status.append(RFC5322Diagnosis('DOMAIN_TOOLONG')) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Forward-path = Path # # Path = "<" [ A-d-l ":" ] Mailbox ">" # # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3 # The maximum total length of a reverse-path or forward-path is # 256 octets (including the punctuation and element separators). # # Thus, even without (obsolete) routing information, the Mailbox # can only be 254 characters long. This is confirmed by this # verified erratum to RFC 3696: # # http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690 # However, there is a restriction in RFC 2821 on the length of an # address in MAIL and RCPT commands of 254 characters. Since # addresses that do not fit in those fields are not normally # useful, the upper limit on address lengths should normally be # considered to be 254. elif len(parse_data[Context.LOCALPART] + Char.AT + parse_data[Context.DOMAIN]) > 254: return_status.append(RFC5322Diagnosis('TOOLONG')) # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less elif element_len > 63: return_status.append(RFC5322Diagnosis('LABEL_TOOLONG')) return_status = list(set(return_status)) final_status = max(return_status) if len(return_status) != 1: # Remove redundant ValidDiagnosis return_status.pop(0) parse_data['status'] = return_status if final_status < threshold: final_status = ValidDiagnosis() if diagnose: return final_status else: return final_status < BaseDiagnosis.CATEGORIES['THRESHOLD']
[ "def", "is_email", "(", "self", ",", "address", ",", "diagnose", "=", "False", ")", ":", "threshold", "=", "BaseDiagnosis", ".", "CATEGORIES", "[", "'VALID'", "]", "return_status", "=", "[", "ValidDiagnosis", "(", ")", "]", "parse_data", "=", "{", "}", "# Parse the address into components, character by character", "raw_length", "=", "len", "(", "address", ")", "context", "=", "Context", ".", "LOCALPART", "# Where we are", "context_stack", "=", "[", "context", "]", "# Where we've been", "context_prior", "=", "Context", ".", "LOCALPART", "# Where we just came from", "token", "=", "''", "# The current character", "token_prior", "=", "''", "# The previous character", "parse_data", "[", "Context", ".", "LOCALPART", "]", "=", "''", "# The address' components", "parse_data", "[", "Context", ".", "DOMAIN", "]", "=", "''", "atom_list", "=", "{", "Context", ".", "LOCALPART", ":", "[", "''", "]", ",", "Context", ".", "DOMAIN", ":", "[", "''", "]", "}", "# The address' dot-atoms", "element_count", "=", "0", "element_len", "=", "0", "hyphen_flag", "=", "False", "# Hyphen cannot occur at the end of a subdomain", "end_or_die", "=", "False", "# CFWS can only appear at the end of an element", "skip", "=", "False", "# Skip flag that simulates i++", "crlf_count", "=", "-", "1", "# crlf_count = -1 == !isset(crlf_count)", "for", "i", "in", "_range", "(", "raw_length", ")", ":", "# Skip simulates the use of ++ operator", "if", "skip", ":", "skip", "=", "False", "continue", "token", "=", "address", "[", "i", "]", "token", "=", "to_char", "(", "token", ")", "# Switch to simulate decrementing; needed for FWS", "repeat", "=", "True", "while", "repeat", ":", "repeat", "=", "False", "# -------------------------------------------------------", "# Local part", "# -------------------------------------------------------", "if", "context", "==", "Context", ".", "LOCALPART", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# local-part = dot-atom / quoted-string /", "# obs-local-part", "#", "# dot-atom = [CFWS] dot-atom-text [CFWS]", "#", "# dot-atom-text = 1*atext *(\".\" 1*atext)", "#", "# quoted-string = [CFWS]", "# DQUOTE *([FWS] qcontent) [FWS] DQUOTE", "# [CFWS]", "#", "# obs-local-part = word *(\".\" word)", "#", "# word = atom / quoted-string", "#", "# atom = [CFWS] 1*atext [CFWS]", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "if", "element_len", "==", "0", ":", "# Comments are OK at the beginning of an element", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "# We can't start a comment in the middle of an", "# element, so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "elif", "token", "==", "Char", ".", "DOT", ":", "if", "element_len", "==", "0", ":", "# Another dot, already? Fatal error", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_START'", ")", ")", "else", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CONSECUTIVEDOTS'", ")", ")", "else", ":", "# The entire local-part can be a quoted string for", "# RFC 5321. If it's just one atom that is quoted", "# then it's an RFC 5322 obsolete form", "if", "end_or_die", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'LOCALPART'", ")", ")", "# CFWS & quoted strings are OK again now we're at", "# the beginning of an element (although they are", "# obsolete forms)", "end_or_die", "=", "False", "element_len", "=", "0", "element_count", "+=", "1", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", ".", "append", "(", "''", ")", "elif", "token", "==", "Char", ".", "DQUOTE", ":", "if", "element_len", "==", "0", ":", "# The entire local-part can be a quoted string for", "# RFC 5321. If it's just one atom that is quoted", "# then it's an RFC 5322 obsolete form", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'QUOTEDSTRING'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'LOCALPART'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDSTRING", "else", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "if", "element_len", "==", "0", ":", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "# We can't start FWS in the middle of an element,", "# so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# @", "elif", "token", "==", "Char", ".", "AT", ":", "# At this point we should have a valid local-part", "if", "len", "(", "context_stack", ")", "!=", "1", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "if", "parse_data", "[", "Context", ".", "LOCALPART", "]", "==", "''", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'NOLOCALPART'", ")", ")", "elif", "element_len", "==", "0", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_END'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1", "# The maximum total length of a user name or other", "# local-part is 64 octets.", "elif", "len", "(", "parse_data", "[", "Context", ".", "LOCALPART", "]", ")", ">", "64", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LOCAL_TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# Comments and folding white space", "# SHOULD NOT be used around the \"@\" in the addr-spec.", "#", "# http://tools.ietf.org/html/rfc2119", "# 4. SHOULD NOT This phrase, or the phrase \"NOT", "# RECOMMENDED\" mean that there may exist valid", "# reasons in particular circumstances when the", "# particular behavior is acceptable or even useful,", "# but the full implications should be understood and", "# the case carefully weighed before implementing any", "# behavior described with this label.", "elif", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "# Clear everything down for the domain parsing", "context", "=", "Context", ".", "DOMAIN", "context_stack", "=", "[", "]", "element_count", "=", "0", "element_len", "=", "0", "# CFWS can only appear at the end of the element", "end_or_die", "=", "False", "# atext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# atext = ALPHA / DIGIT / ; Printable US-ASCII", "# \"!\" / \"#\" / ; characters not", "# \"$\" / \"%\" / ; including specials.", "# \"&\" / \"'\" / ; Used for atoms.", "# \"*\" / \"+\" /", "# \"-\" / \"/\" /", "# \"=\" / \"?\" /", "# \"^\" / \"_\" /", "# \"`\" / \"{\" /", "# \"|\" / \"}\" /", "# \"~\"", "if", "end_or_die", ":", "# We have encountered atext where it is no longer", "# valid", "if", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_CFWS'", ")", ")", "elif", "context_prior", "==", "Context", ".", "QUOTEDSTRING", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_QS'", ")", ")", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "else", ":", "context_prior", "=", "context", "o", "=", "ord", "(", "token", ")", "if", "(", "o", "<", "33", "or", "o", ">", "126", "or", "o", "==", "10", "or", "token", "in", "Char", ".", "SPECIALS", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Domain", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "DOMAIN", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# domain = dot-atom / domain-literal / obs-domain", "#", "# dot-atom = [CFWS] dot-atom-text [CFWS]", "#", "# dot-atom-text = 1*atext *(\".\" 1*atext)", "#", "# domain-literal = [CFWS]", "# \"[\" *([FWS] dtext) [FWS] \"]\"", "# [CFWS]", "#", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-domain = atom *(\".\" atom)", "#", "# atom = [CFWS] 1*atext [CFWS]", "#", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# Mailbox = Local-part", "# \"@\"", "# ( Domain / address-literal )", "#", "# Domain = sub-domain *(\".\" sub-domain)", "#", "# address-literal = \"[\" ( IPv4-address-literal /", "# IPv6-address-literal /", "# General-address-literal ) \"]\"", "# ; See Section 4.1.3", "#", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# Note: A liberal syntax for the domain portion of", "# addr-spec is given here. However, the domain portion", "# contains addressing information specified by and", "# used in other protocols (e.g., RFC 1034, RFC 1035,", "# RFC 1123, RFC5321). It is therefore incumbent upon", "# implementations to conform to the syntax of", "# addresse for the context in which they are used.", "# is_email() author's note: it's not clear how to interpret", "# this in the context of a general address address", "# validator. The conclusion I have reached is this:", "# \"addressing information\" must comply with RFC 5321 (and", "# in turn RFC 1035), anything that is \"semantically", "# invisible\" must comply only with RFC 5322.", "# Comment", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "if", "element_len", "==", "0", ":", "# Comments at the start of the domain are", "# deprecated in the text", "# Comments at the start of a subdomain are", "# obs-domain", "# (http://tools.ietf.org/html/rfc5322#section-3.4.1)", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "# We can't start a comment in the middle of an", "# element, so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "# Next dot-atom element", "elif", "token", "==", "Char", ".", "DOT", ":", "if", "element_len", "==", "0", ":", "# Another dot, already? Fatal error", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_START'", ")", ")", "else", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CONSECUTIVEDOTS'", ")", ")", "elif", "hyphen_flag", ":", "# Previous subdomain ended in a hyphen. Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENEND'", ")", ")", "else", ":", "# Nowhere in RFC 5321 does it say explicitly that", "# the domain part of a Mailbox must be a valid", "# domain according to the DNS standards set out in", "# RFC 1035, but this *is* implied in several", "# places. For instance, wherever the idea of host", "# routing is discussed the RFC says that the domain", "# must be looked up in the DNS. This would be", "# nonsense unless the domain was designed to be a", "# valid DNS domain. Hence we must conclude that the", "# RFC 1035 restriction on label length also applies", "# to RFC 5321 domains.", "#", "# http://tools.ietf.org/html/rfc1035#section-2.3.4", "# labels 63 octets or less", "if", "element_len", ">", "63", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LABEL_TOOLONG'", ")", ")", "# CFWS is OK again now we're at the beginning of an", "# element (although it may be obsolete CFWS)", "end_or_die", "=", "False", "element_len", "=", "0", "element_count", "+=", "1", "atom_list", "[", "Context", ".", "DOMAIN", "]", ".", "append", "(", "''", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "# Domain literal", "elif", "token", "==", "Char", ".", "OPENSQBRACKET", ":", "if", "parse_data", "[", "Context", ".", "DOMAIN", "]", "==", "''", ":", "# Domain literal must be the only component", "end_or_die", "=", "True", "element_len", "+=", "1", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "LITERAL", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "parse_data", "[", "'literal'", "]", "=", "''", "else", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "i", "+", "1", "==", "raw_length", "or", "(", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "if", "element_len", "==", "0", ":", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "# We can't start FWS in the middle of an element,", "# so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# atext", "else", ":", "# RFC 5322 allows any atext...", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# atext = ALPHA / DIGIT / ; Printable US-ASCII", "# \"!\" / \"#\" / ; characters not", "# \"$\" / \"%\" / ; including specials.", "# \"&\" / \"'\" / ; Used for atoms.", "# \"*\" / \"+\" /", "# \"-\" / \"/\" /", "# \"=\" / \"?\" /", "# \"^\" / \"_\" /", "# \"`\" / \"{\" /", "# \"|\" / \"}\" /", "# \"~\"", "# But RFC 5321 only allows letter-digit-hyphen to", "# comply with DNS rules (RFCs 1034 & 1123)", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# sub-domain = Let-dig [Ldh-str]", "#", "# Let-dig = ALPHA / DIGIT", "#", "# Ldh-str = *( ALPHA / DIGIT / \"-\" ) Let-dig", "#", "if", "end_or_die", ":", "# We have encountered atext where it is no longer", "# valid", "if", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_CFWS'", ")", ")", "elif", "context_prior", "==", "Context", ".", "LITERAL", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_DOMLIT'", ")", ")", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "o", "=", "ord", "(", "token", ")", "# Assume this token isn't a hyphen unless we discover", "# it is", "hyphen_flag", "=", "False", "if", "o", "<", "33", "or", "o", ">", "126", "or", "token", "in", "Char", ".", "SPECIALS", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "elif", "token", "==", "Char", ".", "HYPHEN", ":", "if", "element_len", "==", "0", ":", "# Hyphens can't be at the beginning of a", "# subdomain", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENSTART'", ")", ")", "hyphen_flag", "=", "True", "elif", "not", "(", "47", "<", "o", "<", "58", "or", "64", "<", "o", "<", "91", "or", "96", "<", "o", "<", "123", ")", ":", "# Not an RFC 5321 subdomain, but still OK by RFC", "# 5322", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAIN'", ")", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Domain literal", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "LITERAL", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# domain-literal = [CFWS]", "# \"[\" *([FWS] dtext) [FWS] \"]\"", "# [CFWS]", "#", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-dtext = obs-NO-WS-CTL / quoted-pair", "# End of domain literal", "if", "token", "==", "Char", ".", "CLOSESQBRACKET", ":", "if", "(", "max", "(", "return_status", ")", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'DEPREC'", "]", ")", ":", "# Could be a valid RFC 5321 address literal, so", "# let's check", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# address-literal = \"[\" ( IPv4-address-literal /", "# IPv6-address-literal /", "# General-address-literal ) \"]\"", "# ; See Section 4.1.3", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.3", "# IPv4-address-literal = Snum 3(\".\" Snum)", "#", "# IPv6-address-literal = \"IPv6:\" IPv6-addr", "#", "# General-address-literal = Standardized-tag \":\"", "# 1*dcontent", "#", "# Standardized-tag = Ldh-str", "# ; Standardized-tag MUST be", "# ; specified in a", "# ; Standards-Track RFC and", "# ; registered with IANA", "#", "# dcontent = %d33-90 / ; Printable US-ASCII", "# %d94-126 ; excl. \"[\", \"\\\", \"]\"", "#", "# Snum = 1*3DIGIT", "# ; representing a decimal integer", "# ; value in the range 0-255", "#", "# IPv6-addr = IPv6-full / IPv6-comp /", "# IPv6v4-full / IPv6v4-comp", "#", "# IPv6-hex = 1*4HEXDIG", "#", "# IPv6-full = IPv6-hex 7(\":\" IPv6-hex)", "#", "# IPv6-comp = [IPv6-hex *5(\":\" IPv6-hex)]", "# \"::\"", "# [IPv6-hex *5(\":\" IPv6-hex)]", "# ; The \"::\" represents at least 2", "# ; 16-bit groups of zeros. No more", "# ; than 6 groups in addition to", "# ; the \"::\" may be present.", "#", "# IPv6v4-full = IPv6-hex 5(\":\" IPv6-hex) \":\"", "# IPv4-address-literal", "#", "# IPv6v4-comp = [IPv6-hex *3(\":\" IPv6-hex)]", "# \"::\"", "# [IPv6-hex *3(\":\" IPv6-hex) \":\"]", "# IPv4-address-literal", "# ; The \"::\" represents at least 2", "# ; 16-bit groups of zeros. No more", "# ; than 4 groups in addition to", "# ; the \"::\" and", "# ; IPv4-address-literal may be", "# ; present.", "max_groups", "=", "8", "index", "=", "False", "address_literal", "=", "parse_data", "[", "'literal'", "]", "# Extract IPv4 part from the end of the", "# address-literal (if there is one)", "regex", "=", "(", "r\"\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.)\"", "r\"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"", ")", "match_ip", "=", "re", ".", "search", "(", "regex", ",", "address_literal", ")", "if", "match_ip", ":", "index", "=", "address_literal", ".", "rfind", "(", "match_ip", ".", "group", "(", "0", ")", ")", "if", "index", "!=", "0", ":", "# Convert IPv4 part to IPv6 format for", "# further testing", "address_literal", "=", "(", "address_literal", "[", "0", ":", "index", "]", "+", "'0:0'", ")", "if", "index", "==", "0", "and", "index", "is", "not", "False", ":", "# Nothing there except a valid IPv4 address", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'ADDRESSLITERAL'", ")", ")", "elif", "not", "address_literal", ".", "startswith", "(", "Char", ".", "IPV6TAG", ")", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAINLITERAL'", ")", ")", "else", ":", "ipv6", "=", "address_literal", "[", "5", ":", "]", "# Revision 2.7: Daniel Marschall's new IPv6", "# testing strategy", "match_ip", "=", "ipv6", ".", "split", "(", "Char", ".", "COLON", ")", "grp_count", "=", "len", "(", "match_ip", ")", "index", "=", "ipv6", ".", "find", "(", "Char", ".", "DOUBLECOLON", ")", "if", "index", "==", "-", "1", ":", "# We need exactly the right number of", "# groups", "if", "grp_count", "!=", "max_groups", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_GRPCOUNT'", ")", ")", "else", ":", "if", "index", "!=", "ipv6", ".", "rfind", "(", "Char", ".", "DOUBLECOLON", ")", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_2X2XCOLON'", ")", ")", "else", ":", "if", "index", "in", "[", "0", ",", "len", "(", "ipv6", ")", "-", "2", "]", ":", "# RFC 4291 allows :: at the start", "# or end of an address with 7 other", "# groups in addition", "max_groups", "+=", "1", "if", "grp_count", ">", "max_groups", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_MAXGRPS'", ")", ")", "elif", "grp_count", "==", "max_groups", ":", "# Eliding a single \"::\"", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'IPV6DEPRECATED'", ")", ")", "# Revision 2.7: Daniel Marschall's new IPv6", "# testing strategy", "if", "(", "ipv6", "[", "0", "]", "==", "Char", ".", "COLON", "and", "ipv6", "[", "1", "]", "!=", "Char", ".", "COLON", ")", ":", "# Address starts with a single colon", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_COLONSTRT'", ")", ")", "elif", "(", "ipv6", "[", "-", "1", "]", "==", "Char", ".", "COLON", "and", "ipv6", "[", "-", "2", "]", "!=", "Char", ".", "COLON", ")", ":", "# Address ends with a single colon", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_COLONEND'", ")", ")", "elif", "(", "[", "re", ".", "match", "(", "r\"^[0-9A-Fa-f]{0,4}$\"", ",", "i", ")", "for", "i", "in", "match_ip", "]", ".", "count", "(", "None", ")", "!=", "0", ")", ":", "# Check for unmatched characters", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_BADCHAR'", ")", ")", "else", ":", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'ADDRESSLITERAL'", ")", ")", "else", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAINLITERAL'", ")", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "elif", "token", "==", "Char", ".", "BACKSLASH", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMLIT_OBSDTEXT'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# dtext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-dtext = obs-NO-WS-CTL / quoted-pair", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do", "# %d12 / ; not include the", "# %d14-31 / ; carriage return, line", "# %d127 ; feed, and white space", "# ; characters", "o", "=", "ord", "(", "token", ")", "# CR, LF, SP & HTAB have already been parsed above", "if", "o", ">", "127", "or", "o", "==", "0", "or", "token", "==", "Char", ".", "OPENSQBRACKET", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_DTEXT'", ")", ")", "break", "elif", "o", "<", "33", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMLIT_OBSDTEXT'", ")", ")", "parse_data", "[", "'literal'", "]", "+=", "token", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Quoted string", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# quoted-string = [CFWS]", "# DQUOTE *([FWS] qcontent) [FWS] DQUOTE", "# [CFWS]", "#", "# qcontent = qtext / quoted-pair", "# Quoted pair", "if", "token", "==", "Char", ".", "BACKSLASH", ":", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "# Inside a quoted string, spaces are allow as regular", "# characters. It's only FWS if we include HTAB or CRLF", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# Runs of FWS, comment, or CFWS that occur between", "# lexical tokens in a structured header field are", "# semantically interpreted as a single space", "# character.", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# the CRLF in any FWS/CFWS that appears within the", "# quoted string [is] semantically \"invisible\" and", "# therefore not part of the quoted-string", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "Char", ".", "SP", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "Char", ".", "SP", "element_len", "+=", "1", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# End of quoted string", "elif", "token", "==", "Char", ".", "DQUOTE", ":", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# qtext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# qtext = %d33 / ; Printable US-ASCII", "# %d35-91 / ; characters not", "# %d93-126 / ; including \"\\\" or", "# obs-qtext ; the quote", "# ; character", "#", "# obs-qtext = obs-NO-WS-CTL", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do", "# %d12 / ; not include the CR,", "# %d14-31 / ; LF, and white space", "# %d127 ; characters", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", "or", "o", "==", "0", "or", "o", "==", "10", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_QTEXT'", ")", ")", "elif", "o", "<", "32", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'QTEXT'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Quoted pair", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "QUOTEDPAIR", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.1", "# quoted-pair = (\"\\\" (VCHAR / WSP)) / obs-qp", "#", "# VCHAR = %d33-126 ; visible (printing)", "# ; characters", "#", "# WSP = SP / HTAB ; white space", "#", "# obs-qp = \"\\\" (%d0 / obs-NO-WS-CTL / LF / CR)", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do not", "# %d12 / ; include the carriage", "# %d14-31 / ; return, line feed, and", "# %d127 ; white space characters", "#", "# i.e. obs-qp = \"\\\" (%d0-8, %d10-31 / %d127)", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_QPAIR'", ")", ")", "elif", "(", "o", "<", "31", "and", "o", "!=", "9", ")", "or", "o", "==", "127", ":", "# SP & HTAB are allowed", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'QP'", ")", ")", "# At this point we know where this qpair occurred so", "# we could check to see if the character actually", "# needed to be quoted at all.", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# the sending system SHOULD transmit the", "# form that uses the minimum quoting possible.", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# End of qpair", "token", "=", "Char", ".", "BACKSLASH", "+", "token", "if", "context", "==", "Context", ".", "COMMENT", ":", "pass", "elif", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "# The maximum sizes specified by RFC 5321 are octet", "# counts, so we must include the backslash", "element_len", "+=", "2", "elif", "context", "==", "Context", ".", "LITERAL", ":", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "# The maximum sizes specified by RFC 5321 are octet", "# counts, so we must include the backslash", "element_len", "+=", "2", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "# -------------------------------------------------------", "# Comment", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "COMMENT", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# comment = \"(\" *([FWS] ccontent) [FWS] \")\"", "#", "# ccontent = ctext / quoted-pair / comment", "# Nested comment", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "# Nested comments are OK", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "# End of comment", "elif", "token", "==", "Char", ".", "CLOSEPARENTHESIS", ":", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# Quoted pair", "elif", "token", "==", "Char", ".", "BACKSLASH", ":", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# ctext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# ctext = %d33-39 / ; Printable US-", "# %d42-91 / ; ASCII characters", "# %d93-126 / ; not including", "# obs-ctext ; \"(\", \")\", or \"\\\"", "#", "# obs-ctext = obs-NO-WS-CTL", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that", "# %d12 / ; do not include", "# %d14-31 / ; the CR, LF, and", "# ; white space", "# ; characters", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", "or", "o", "==", "0", "or", "o", "==", "10", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_CTEXT'", ")", ")", "break", "elif", "o", "<", "32", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CTEXT'", ")", ")", "# -------------------------------------------------------", "# Folding White Space (FWS)", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "FWS", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# FWS = ([*WSP CRLF] 1*WSP) / obs-FWS", "# ; Folding white space", "#", "# But note the erratum:", "# http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908", "# In the obsolete syntax, any amount of folding white", "# space MAY be inserted where the obs-FWS rule is", "# allowed. This creates the possibility of having two", "# consecutive \"folds\" in a line, and therefore the", "# possibility that a line which makes up a folded header", "# field could be composed entirely of white space.", "#", "# obs-FWS = 1*([CRLF] WSP)", "if", "token_prior", "==", "Char", ".", "CR", ":", "if", "token", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_X2'", ")", ")", "break", "if", "crlf_count", "!=", "-", "1", ":", "crlf_count", "+=", "1", "if", "crlf_count", ">", "1", ":", "# Multiple folds = obsolete FWS", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "crlf_count", "=", "1", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "elif", "token", "in", "[", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "pass", "else", ":", "if", "token_prior", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_END'", ")", ")", "break", "if", "crlf_count", "!=", "-", "1", ":", "crlf_count", "=", "-", "1", "context_prior", "=", "context", "# End of FWS", "context", "=", "context_stack", ".", "pop", "(", ")", "# Look at this token again in the parent context", "repeat", "=", "True", "token_prior", "=", "token", "# -------------------------------------------------------", "# A context we aren't expecting", "# -------------------------------------------------------", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "# No point in going on if we've got a fatal error", "if", "max", "(", "return_status", ")", ">", "BaseDiagnosis", ".", "CATEGORIES", "[", "'RFC5322'", "]", ":", "break", "# Some simple final tests", "if", "max", "(", "return_status", ")", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'RFC5322'", "]", ":", "if", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDQUOTEDSTR'", ")", ")", "elif", "context", "==", "Context", ".", "QUOTEDPAIR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'BACKSLASHEND'", ")", ")", "elif", "context", "==", "Context", ".", "COMMENT", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDCOMMENT'", ")", ")", "elif", "context", "==", "Context", ".", "LITERAL", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDDOMLIT'", ")", ")", "elif", "token", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_END'", ")", ")", "elif", "parse_data", "[", "Context", ".", "DOMAIN", "]", "==", "''", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'NODOMAIN'", ")", ")", "elif", "element_len", "==", "0", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_END'", ")", ")", "elif", "hyphen_flag", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENEND'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2", "# The maximum total length of a domain name or number is 255 octets", "elif", "len", "(", "parse_data", "[", "Context", ".", "DOMAIN", "]", ")", ">", "255", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAIN_TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# Forward-path = Path", "#", "# Path = \"<\" [ A-d-l \":\" ] Mailbox \">\"", "#", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3", "# The maximum total length of a reverse-path or forward-path is", "# 256 octets (including the punctuation and element separators).", "#", "# Thus, even without (obsolete) routing information, the Mailbox", "# can only be 254 characters long. This is confirmed by this", "# verified erratum to RFC 3696:", "#", "# http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690", "# However, there is a restriction in RFC 2821 on the length of an", "# address in MAIL and RCPT commands of 254 characters. Since", "# addresses that do not fit in those fields are not normally", "# useful, the upper limit on address lengths should normally be", "# considered to be 254.", "elif", "len", "(", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+", "Char", ".", "AT", "+", "parse_data", "[", "Context", ".", "DOMAIN", "]", ")", ">", "254", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc1035#section-2.3.4", "# labels 63 octets or less", "elif", "element_len", ">", "63", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LABEL_TOOLONG'", ")", ")", "return_status", "=", "list", "(", "set", "(", "return_status", ")", ")", "final_status", "=", "max", "(", "return_status", ")", "if", "len", "(", "return_status", ")", "!=", "1", ":", "# Remove redundant ValidDiagnosis", "return_status", ".", "pop", "(", "0", ")", "parse_data", "[", "'status'", "]", "=", "return_status", "if", "final_status", "<", "threshold", ":", "final_status", "=", "ValidDiagnosis", "(", ")", "if", "diagnose", ":", "return", "final_status", "else", ":", "return", "final_status", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'THRESHOLD'", "]" ]
51.531515
21.415804
def onesided_cl_to_dlnl(cl): """Compute the delta-loglikehood values that corresponds to an upper limit of the given confidence level. Parameters ---------- cl : float Confidence level. Returns ------- dlnl : float Delta-loglikelihood value with respect to the maximum of the likelihood function. """ alpha = 1.0 - cl return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
[ "def", "onesided_cl_to_dlnl", "(", "cl", ")", ":", "alpha", "=", "1.0", "-", "cl", "return", "0.5", "*", "np", ".", "power", "(", "np", ".", "sqrt", "(", "2.", ")", "*", "special", ".", "erfinv", "(", "1", "-", "2", "*", "alpha", ")", ",", "2.", ")" ]
26
21.588235
def set_enable(self, on, channel=1): """ channel: 1=OP1, 2=OP2, 3=AUX, ALL=all channels""" if isinstance(channel, str): cmd = "OPALL %d" % int(on) elif isinstance(channel, int): cmd = "OP%d %d" % (channel, int(on)) self.write(cmd)
[ "def", "set_enable", "(", "self", ",", "on", ",", "channel", "=", "1", ")", ":", "if", "isinstance", "(", "channel", ",", "str", ")", ":", "cmd", "=", "\"OPALL %d\"", "%", "int", "(", "on", ")", "elif", "isinstance", "(", "channel", ",", "int", ")", ":", "cmd", "=", "\"OP%d %d\"", "%", "(", "channel", ",", "int", "(", "on", ")", ")", "self", ".", "write", "(", "cmd", ")" ]
40
5.285714
def _list_directory(self, folder_id=''): ''' a generator method for listing the contents of a directory ''' title = '%s._list_directory' % self.__class__.__name__ # construct default response file_list = [] # construct request kwargs list_kwargs = { 'spaces': self.drive_space, 'fields': 'nextPageToken, files(id, name, parents, mimeType)' } # add query field for parent if folder_id: list_kwargs['q'] = "'%s' in parents" % folder_id # retrieve space id if not self.space_id: self._get_space() # send request page_token = 1 while page_token: try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) # populate list from response results = response.get('files', []) for file in results: if not folder_id and file.get('parents', [])[0] != self.space_id: pass else: yield file.get('id', ''), file.get('name', ''), file.get('mimeType', '') # get page token page_token = response.get('nextPageToken', None) if page_token: list_kwargs['pageToken'] = page_token return file_list
[ "def", "_list_directory", "(", "self", ",", "folder_id", "=", "''", ")", ":", "title", "=", "'%s._list_directory'", "%", "self", ".", "__class__", ".", "__name__", "# construct default response", "file_list", "=", "[", "]", "# construct request kwargs", "list_kwargs", "=", "{", "'spaces'", ":", "self", ".", "drive_space", ",", "'fields'", ":", "'nextPageToken, files(id, name, parents, mimeType)'", "}", "# add query field for parent", "if", "folder_id", ":", "list_kwargs", "[", "'q'", "]", "=", "\"'%s' in parents\"", "%", "folder_id", "# retrieve space id", "if", "not", "self", ".", "space_id", ":", "self", ".", "_get_space", "(", ")", "# send request", "page_token", "=", "1", "while", "page_token", ":", "try", ":", "response", "=", "self", ".", "drive", ".", "list", "(", "*", "*", "list_kwargs", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "# populate list from response", "results", "=", "response", ".", "get", "(", "'files'", ",", "[", "]", ")", "for", "file", "in", "results", ":", "if", "not", "folder_id", "and", "file", ".", "get", "(", "'parents'", ",", "[", "]", ")", "[", "0", "]", "!=", "self", ".", "space_id", ":", "pass", "else", ":", "yield", "file", ".", "get", "(", "'id'", ",", "''", ")", ",", "file", ".", "get", "(", "'name'", ",", "''", ")", ",", "file", ".", "get", "(", "'mimeType'", ",", "''", ")", "# get page token", "page_token", "=", "response", ".", "get", "(", "'nextPageToken'", ",", "None", ")", "if", "page_token", ":", "list_kwargs", "[", "'pageToken'", "]", "=", "page_token", "return", "file_list" ]
31
21.355556
def soundex_br(word, max_length=4, zero_pad=True): """Return the SoundexBR encoding of a word. This is a wrapper for :py:meth:`SoundexBR.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The SoundexBR code Examples -------- >>> soundex_br('Oliveira') 'O416' >>> soundex_br('Almeida') 'A453' >>> soundex_br('Barbosa') 'B612' >>> soundex_br('Araújo') 'A620' >>> soundex_br('Gonçalves') 'G524' >>> soundex_br('Goncalves') 'G524' """ return SoundexBR().encode(word, max_length, zero_pad)
[ "def", "soundex_br", "(", "word", ",", "max_length", "=", "4", ",", "zero_pad", "=", "True", ")", ":", "return", "SoundexBR", "(", ")", ".", "encode", "(", "word", ",", "max_length", ",", "zero_pad", ")" ]
21.527778
22.972222
def prefixed_to_namespaced(C, prefixed_name, namespaces): """for a given prefix:name, return {namespace}name from the given namespaces dict """ if ':' not in prefixed_name: return prefixed_name else: prefix, name = prefixed_name.split(':') namespace = namespaces[prefix] return "{%s}%s" % (namespace, name)
[ "def", "prefixed_to_namespaced", "(", "C", ",", "prefixed_name", ",", "namespaces", ")", ":", "if", "':'", "not", "in", "prefixed_name", ":", "return", "prefixed_name", "else", ":", "prefix", ",", "name", "=", "prefixed_name", ".", "split", "(", "':'", ")", "namespace", "=", "namespaces", "[", "prefix", "]", "return", "\"{%s}%s\"", "%", "(", "namespace", ",", "name", ")" ]
42.888889
8.444444
def list(self, email=None, displayName=None, id=None, orgId=None, max=None, **request_parameters): """List people This method supports Webex Teams's implementation of RFC5988 Web Linking to provide pagination support. It returns a generator container that incrementally yields all people returned by the query. The generator will automatically request additional 'pages' of responses from Webex as needed until all responses have been returned. The container makes the generator safe for reuse. A new API call will be made, using the same parameters that were specified when the generator was created, every time a new iterator is requested from the container. Args: email(basestring): The e-mail address of the person to be found. displayName(basestring): The complete or beginning portion of the displayName to be searched. id(basestring): List people by ID. Accepts up to 85 person IDs separated by commas. orgId(basestring): The organization ID. max(int): Limit the maximum number of items returned from the Webex Teams service per request. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: GeneratorContainer: A GeneratorContainer which, when iterated, yields the people returned by the Webex Teams query. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error. """ check_type(id, basestring) check_type(email, basestring) check_type(displayName, basestring) check_type(orgId, basestring) check_type(max, int) params = dict_from_items_with_values( request_parameters, id=id, email=email, displayName=displayName, orgId=orgId, max=max, ) # API request - get items items = self._session.get_items(API_ENDPOINT, params=params) # Yield person objects created from the returned items JSON objects for item in items: yield self._object_factory(OBJECT_TYPE, item)
[ "def", "list", "(", "self", ",", "email", "=", "None", ",", "displayName", "=", "None", ",", "id", "=", "None", ",", "orgId", "=", "None", ",", "max", "=", "None", ",", "*", "*", "request_parameters", ")", ":", "check_type", "(", "id", ",", "basestring", ")", "check_type", "(", "email", ",", "basestring", ")", "check_type", "(", "displayName", ",", "basestring", ")", "check_type", "(", "orgId", ",", "basestring", ")", "check_type", "(", "max", ",", "int", ")", "params", "=", "dict_from_items_with_values", "(", "request_parameters", ",", "id", "=", "id", ",", "email", "=", "email", ",", "displayName", "=", "displayName", ",", "orgId", "=", "orgId", ",", "max", "=", "max", ",", ")", "# API request - get items", "items", "=", "self", ".", "_session", ".", "get_items", "(", "API_ENDPOINT", ",", "params", "=", "params", ")", "# Yield person objects created from the returned items JSON objects", "for", "item", "in", "items", ":", "yield", "self", ".", "_object_factory", "(", "OBJECT_TYPE", ",", "item", ")" ]
41.607143
23.517857
def _mk_context_menu(self, thumbkey, chname, info): """NOTE: currently not used, but left here to be reincorporated at some point. """ menu = Widgets.Menu() item = menu.add_name("Display") item.add_callback('activated', lambda w: self.load_file( thumbkey, chname, info.name, info.path, info.image_future)) menu.add_separator() item = menu.add_name("Remove") item.add_callback('activated', lambda w: self.fv.remove_image_by_name( chname, info.name, impath=info.path)) return menu
[ "def", "_mk_context_menu", "(", "self", ",", "thumbkey", ",", "chname", ",", "info", ")", ":", "menu", "=", "Widgets", ".", "Menu", "(", ")", "item", "=", "menu", ".", "add_name", "(", "\"Display\"", ")", "item", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "load_file", "(", "thumbkey", ",", "chname", ",", "info", ".", "name", ",", "info", ".", "path", ",", "info", ".", "image_future", ")", ")", "menu", ".", "add_separator", "(", ")", "item", "=", "menu", ".", "add_name", "(", "\"Remove\"", ")", "item", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "fv", ".", "remove_image_by_name", "(", "chname", ",", "info", ".", "name", ",", "impath", "=", "info", ".", "path", ")", ")", "return", "menu" ]
40.294118
13
def upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs): ''' upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs) Upload a service version to Opereto :Parameters: * *service_zip_file* (`string`) -- zip file location containing service and service specification * *mode* (`string`) -- production/development (default is production) * *service_version* (`string`) -- Service version * *service_id* (`string`) -- Service Identifier :Keywords args: * *comment* (`string`) -- comment :Example: .. code-block:: python opereto_client.upload_service_version(service_zip_file=zip_action_file+'.zip', mode='production', service_version='111') ''' files = {'service_file': open(service_zip_file,'rb')} url_suffix = '/services/upload/%s'%mode if mode=='production': url_suffix+='/'+service_version if service_id: url_suffix+='/'+service_id if kwargs: url_suffix=url_suffix+'?'+urlencode(kwargs) return self._call_rest_api('post', url_suffix, files=files, error='Failed to upload service version')
[ "def", "upload_service_version", "(", "self", ",", "service_zip_file", ",", "mode", "=", "'production'", ",", "service_version", "=", "'default'", ",", "service_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "files", "=", "{", "'service_file'", ":", "open", "(", "service_zip_file", ",", "'rb'", ")", "}", "url_suffix", "=", "'/services/upload/%s'", "%", "mode", "if", "mode", "==", "'production'", ":", "url_suffix", "+=", "'/'", "+", "service_version", "if", "service_id", ":", "url_suffix", "+=", "'/'", "+", "service_id", "if", "kwargs", ":", "url_suffix", "=", "url_suffix", "+", "'?'", "+", "urlencode", "(", "kwargs", ")", "return", "self", ".", "_call_rest_api", "(", "'post'", ",", "url_suffix", ",", "files", "=", "files", ",", "error", "=", "'Failed to upload service version'", ")" ]
43.166667
31.166667
def estimate_markov_model(dtrajs, lag, reversible=True, statdist=None, count_mode='sliding', weights='empirical', sparse=False, connectivity='largest', dt_traj='1 step', maxiter=1000000, maxerr=1e-8, score_method='VAMP2', score_k=10, mincount_connectivity='1/n'): r""" Estimates a Markov model from discrete trajectories Returns a :class:`MaximumLikelihoodMSM` that contains the estimated transition matrix and allows to compute a large number of quantities related to Markov models. Parameters ---------- dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) discrete trajectories, stored as integer ndarrays (arbitrary size) or a single ndarray for only one trajectory. lag : int lag time at which transitions are counted and the transition matrix is estimated. reversible : bool, optional If true compute reversible MSM, else non-reversible MSM statdist : (M,) ndarray, optional Stationary vector on the full state-space. Transition matrix will be estimated such that statdist is its equilibrium distribution. count_mode : str, optional, default='sliding' mode to obtain count matrices from discrete trajectories. Should be one of: * 'sliding' : A trajectory of length T will have :math:`T-\tau` counts at time indexes .. math:: (0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1) * 'effective' : Uses an estimate of the transition counts that are statistically uncorrelated. Recommended when used with a Bayesian MSM. * 'sample' : A trajectory of length T will have :math:`T/\tau` counts at time indexes .. math:: (0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/\tau)-1) \tau \rightarrow T) weights : str, optional can be used to re-weight non-equilibrium data to equilibrium. Must be one of the following: * 'empirical': Each trajectory frame counts as one. (default) * 'oom': Each transition is re-weighted using OOM theory, see [11]_. sparse : bool, optional If true compute count matrix, transition matrix and all derived quantities using sparse matrix algebra. In this case python sparse matrices will be returned by the corresponding functions instead of numpy arrays. This behavior is suggested for very large numbers of states (e.g. > 4000) because it is likely to be much more efficient. connectivity : str, optional Connectivity mode. Three methods are intended (currently only 'largest' is implemented) * 'largest' : The active set is the largest reversibly connected set. All estimation will be done on this subset and all quantities (transition matrix, stationary distribution, etc) are only defined on this subset and are correspondingly smaller than the full set of states * 'all' : The active set is the full set of states. Estimation will be conducted on each reversibly connected set separately. That means the transition matrix will decompose into disconnected submatrices, the stationary vector is only defined within subsets, etc. Currently not implemented. * 'none' : The active set is the full set of states. Estimation will be conducted on the full set of states without ensuring connectivity. This only permits nonreversible estimation. Currently not implemented. dt_traj : str, optional Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): * 'fs', 'femtosecond*' * 'ps', 'picosecond*' * 'ns', 'nanosecond*' * 'us', 'microsecond*' * 'ms', 'millisecond*' * 's', 'second*' maxiter : int, optional Optional parameter with reversible = True. maximum number of iterations before the transition matrix estimation method exits maxerr : float, optional Optional parameter with reversible = True. convergence tolerance for transition matrix estimation. This specifies the maximum change of the Euclidean norm of relative stationary probabilities (:math:`x_i = \sum_k x_{ik}`). The relative stationary probability changes :math:`e_i = (x_i^{(1)} - x_i^{(2)})/(x_i^{(1)} + x_i^{(2)})` are used in order to track changes in small probabilities. The Euclidean norm of the change vector, :math:`|e_i|_2`, is compared to maxerr. score_method : str, optional, default='VAMP2' Score to be used with MSM score function. Available scores are based on the variational approach for Markov processes [13]_ [14]_: * 'VAMP1' Sum of singular values of the symmetrized transition matrix [14]_ . If the MSM is reversible, this is equal to the sum of transition matrix eigenvalues, also called Rayleigh quotient [13]_ [15]_ . * 'VAMP2' Sum of squared singular values of the symmetrized transition matrix [14]_ . If the MSM is reversible, this is equal to the kinetic variance [16]_ . score_k : int or None The maximum number of eigenvalues or singular values used in the score. If set to None, all available eigenvalues will be used. mincount_connectivity : float or '1/n' minimum number of counts to consider a connection between two states. Counts lower than that will count zero in the connectivity check and may thus separate the resulting transition matrix. The default evaluates to 1/nstates. Returns ------- msm : :class:`MaximumLikelihoodMSM <pyemma.msm.MaximumLikelihoodMSM>` Estimator object containing the MSM and estimation information. See also -------- MaximumLikelihoodMSM An MSM object that has been estimated from data .. autoclass:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.msm.estimators.maximum_likelihood_msm.MaximumLikelihoodMSM :attributes: References ---------- The mathematical theory of Markov (state) model estimation was introduced in [1]_ . Further theoretical developments were made in [2]_ . The term Markov state model was coined in [3]_ . Continuous-time Markov models (Master equation models) were suggested in [4]_. Reversible Markov model estimation was introduced in [5]_ , and further developed in [6]_ [7]_ [9]_ . It was shown in [8]_ that the quality of Markov state models does in fact not depend on memory loss, but rather on where the discretization is suitable to approximate the eigenfunctions of the Markov operator (the 'reaction coordinates'). With a suitable choice of discretization and lag time, MSMs can thus become very accurate. [9]_ introduced a number of methodological improvements and gives a good overview of the methodological basics of Markov state modeling today. [10]_ is a more extensive review book of theory, methods and applications. .. [1] Schuette, C. , A. Fischer, W. Huisinga and P. Deuflhard: A Direct Approach to Conformational Dynamics based on Hybrid Monte Carlo. J. Comput. Phys., 151, 146-168 (1999) .. [2] Swope, W. C., J. W. Pitera and F. Suits: Describing protein folding kinetics by molecular dynamics simulations: 1. Theory J. Phys. Chem. B 108, 6571-6581 (2004) .. [3] Singhal, N., C. D. Snow, V. S. Pande: Using path sampling to build better Markovian state models: Predicting the folding rate and mechanism of a tryptophan zipper beta hairpin. J. Chem. Phys. 121, 415 (2004). .. [4] Sriraman, S., I. G. Kevrekidis and G. Hummer, G. J. Phys. Chem. B 109, 6479-6484 (2005) .. [5] Noe, F.: Probability Distributions of Molecular Observables computed from Markov Models. J. Chem. Phys. 128, 244103 (2008) .. [6] Buchete, N.-V. and Hummer, G.: Coarse master equations for peptide folding dynamics. J. Phys. Chem. B 112, 6057--6069 (2008) .. [7] Bowman, G. R., K. A. Beauchamp, G. Boxer and V. S. Pande: Progress and challenges in the automated construction of Markov state models for full protein systems. J. Chem. Phys. 131, 124101 (2009) .. [8] Sarich, M., F. Noe and C. Schuette: On the approximation quality of Markov state models. SIAM Multiscale Model. Simul. 8, 1154-1177 (2010) .. [9] Prinz, J.-H., H. Wu, M. Sarich, B. Keller, M. Senne, M. Held, J. D. Chodera, C. Schuette and F. Noe: Markov models of molecular kinetics: Generation and Validation J. Chem. Phys. 134, 174105 (2011) .. [10] Bowman, G. R., V. S. Pande and F. Noe: An Introduction to Markov State Models and Their Application to Long Timescale Molecular Simulation. Advances in Experimental Medicine and Biology 797, Springer, Heidelberg (2014) .. [11] Nueske, F., Wu, H., Prinz, J.-H., Wehmeyer, C., Clementi, C. and Noe, F.: Markov State Models from short non-Equilibrium Simulations - Analysis and Correction of Estimation Bias J. Chem. Phys. (submitted) (2017) .. [12] H. Wu and F. Noe: Variational approach for learning Markov processes from time series data (in preparation) .. [13] Noe, F. and F. Nueske: A variational approach to modeling slow processes in stochastic dynamical systems. SIAM Multiscale Model. Simul. 11, 635-655 (2013). .. [14] Wu, H and F. Noe: Variational approach for learning Markov processes from time series data (in preparation) .. [15] McGibbon, R and V. S. Pande: Variational cross-validation of slow dynamical modes in molecular kinetics, J. Chem. Phys. 142, 124105 (2015) .. [16] Noe, F. and C. Clementi: Kinetic distance and kinetic maps from molecular dynamics simulation. J. Chem. Theory Comput. 11, 5002-5011 (2015) Example ------- >>> from pyemma import msm >>> import numpy as np >>> np.set_printoptions(precision=3) >>> dtrajs = [[0,1,2,2,2,2,1,2,2,2,1,0,0,0,0,0,0,0], [0,0,0,0,1,1,2,2,2,2,2,2,2,1,0,0]] # two trajectories >>> mm = msm.estimate_markov_model(dtrajs, 2) Which is the active set of states we are working on? >>> print(mm.active_set) [0 1 2] Show the count matrix >>> print(mm.count_matrix_active) [[ 7. 2. 1.] [ 2. 0. 4.] [ 2. 3. 9.]] Show the estimated transition matrix >>> print(mm.transition_matrix) [[ 0.7 0.167 0.133] [ 0.388 0. 0.612] [ 0.119 0.238 0.643]] Is this model reversible (i.e. does it fulfill detailed balance)? >>> print(mm.is_reversible) True What is the equilibrium distribution of states? >>> print(mm.stationary_distribution) [ 0.393 0.17 0.437] Relaxation timescales? >>> print(mm.timescales()) [ 3.415 1.297] Mean first passage time from state 0 to 2: >>> print(mm.mfpt(0, 2)) # doctest: +ELLIPSIS 9.929... """ # Catch invalid inputs for weights: if isinstance(weights, str): if weights not in ['empirical', 'oom']: raise ValueError("Weights must be either \'empirical\' or \'oom\'") else: raise ValueError("Weights must be either \'empirical\' or \'oom\'") # transition matrix estimator if weights == 'empirical': mlmsm = _ML_MSM(lag=lag, reversible=reversible, statdist_constraint=statdist, count_mode=count_mode, sparse=sparse, connectivity=connectivity, dt_traj=dt_traj, maxiter=maxiter, maxerr=maxerr, score_method=score_method, score_k=score_k, mincount_connectivity=mincount_connectivity) # estimate and return return mlmsm.estimate(dtrajs) elif weights == 'oom': if (statdist is not None) or (maxiter != 1000000) or (maxerr != 1e-8): import warnings warnings.warn("Values for statdist, maxiter or maxerr are ignored if OOM-correction is used.") oom_msm = _OOM_MSM(lag=lag, reversible=reversible, count_mode=count_mode, sparse=sparse, connectivity=connectivity, dt_traj=dt_traj, score_method=score_method, score_k=score_k, mincount_connectivity=mincount_connectivity) # estimate and return return oom_msm.estimate(dtrajs)
[ "def", "estimate_markov_model", "(", "dtrajs", ",", "lag", ",", "reversible", "=", "True", ",", "statdist", "=", "None", ",", "count_mode", "=", "'sliding'", ",", "weights", "=", "'empirical'", ",", "sparse", "=", "False", ",", "connectivity", "=", "'largest'", ",", "dt_traj", "=", "'1 step'", ",", "maxiter", "=", "1000000", ",", "maxerr", "=", "1e-8", ",", "score_method", "=", "'VAMP2'", ",", "score_k", "=", "10", ",", "mincount_connectivity", "=", "'1/n'", ")", ":", "# Catch invalid inputs for weights:", "if", "isinstance", "(", "weights", ",", "str", ")", ":", "if", "weights", "not", "in", "[", "'empirical'", ",", "'oom'", "]", ":", "raise", "ValueError", "(", "\"Weights must be either \\'empirical\\' or \\'oom\\'\"", ")", "else", ":", "raise", "ValueError", "(", "\"Weights must be either \\'empirical\\' or \\'oom\\'\"", ")", "# transition matrix estimator", "if", "weights", "==", "'empirical'", ":", "mlmsm", "=", "_ML_MSM", "(", "lag", "=", "lag", ",", "reversible", "=", "reversible", ",", "statdist_constraint", "=", "statdist", ",", "count_mode", "=", "count_mode", ",", "sparse", "=", "sparse", ",", "connectivity", "=", "connectivity", ",", "dt_traj", "=", "dt_traj", ",", "maxiter", "=", "maxiter", ",", "maxerr", "=", "maxerr", ",", "score_method", "=", "score_method", ",", "score_k", "=", "score_k", ",", "mincount_connectivity", "=", "mincount_connectivity", ")", "# estimate and return", "return", "mlmsm", ".", "estimate", "(", "dtrajs", ")", "elif", "weights", "==", "'oom'", ":", "if", "(", "statdist", "is", "not", "None", ")", "or", "(", "maxiter", "!=", "1000000", ")", "or", "(", "maxerr", "!=", "1e-8", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"Values for statdist, maxiter or maxerr are ignored if OOM-correction is used.\"", ")", "oom_msm", "=", "_OOM_MSM", "(", "lag", "=", "lag", ",", "reversible", "=", "reversible", ",", "count_mode", "=", "count_mode", ",", "sparse", "=", "sparse", ",", "connectivity", "=", "connectivity", ",", "dt_traj", "=", "dt_traj", ",", "score_method", "=", "score_method", ",", "score_k", "=", "score_k", ",", "mincount_connectivity", "=", "mincount_connectivity", ")", "# estimate and return", "return", "oom_msm", ".", "estimate", "(", "dtrajs", ")" ]
42.858553
28.026316
def delete_certificate(self, certificate_id): """Delete a certificate. :param str certificate_id: The certificate id (Required) :returns: void """ api = self._get_api(iam.DeveloperApi) api.delete_certificate(certificate_id) return
[ "def", "delete_certificate", "(", "self", ",", "certificate_id", ")", ":", "api", "=", "self", ".", "_get_api", "(", "iam", ".", "DeveloperApi", ")", "api", ".", "delete_certificate", "(", "certificate_id", ")", "return" ]
31
13.777778
def genExampleStar(binaryLetter='', heirarchy=True): """ generates example star, if binaryLetter is true creates a parent binary object, if heirarchy is true will create a system and link everything up """ starPar = StarParameters() starPar.addParam('age', '7.6') starPar.addParam('magB', '9.8') starPar.addParam('magH', '7.4') starPar.addParam('magI', '7.6') starPar.addParam('magJ', '7.5') starPar.addParam('magK', '7.3') starPar.addParam('magV', '9.0') starPar.addParam('mass', '0.98') starPar.addParam('metallicity', '0.43') starPar.addParam('name', 'Example Star {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('name', 'HD {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('radius', '0.95') starPar.addParam('spectraltype', 'G5') starPar.addParam('temperature', '5370') exampleStar = Star(starPar.params) exampleStar.flags.addFlag('Fake') if heirarchy: if binaryLetter: exampleBinary = genExampleBinary() exampleBinary._addChild(exampleStar) exampleStar.parent = exampleBinary else: exampleSystem = genExampleSystem() exampleSystem._addChild(exampleStar) exampleStar.parent = exampleSystem return exampleStar
[ "def", "genExampleStar", "(", "binaryLetter", "=", "''", ",", "heirarchy", "=", "True", ")", ":", "starPar", "=", "StarParameters", "(", ")", "starPar", ".", "addParam", "(", "'age'", ",", "'7.6'", ")", "starPar", ".", "addParam", "(", "'magB'", ",", "'9.8'", ")", "starPar", ".", "addParam", "(", "'magH'", ",", "'7.4'", ")", "starPar", ".", "addParam", "(", "'magI'", ",", "'7.6'", ")", "starPar", ".", "addParam", "(", "'magJ'", ",", "'7.5'", ")", "starPar", ".", "addParam", "(", "'magK'", ",", "'7.3'", ")", "starPar", ".", "addParam", "(", "'magV'", ",", "'9.0'", ")", "starPar", ".", "addParam", "(", "'mass'", ",", "'0.98'", ")", "starPar", ".", "addParam", "(", "'metallicity'", ",", "'0.43'", ")", "starPar", ".", "addParam", "(", "'name'", ",", "'Example Star {0}{1}'", ".", "format", "(", "ac", ".", "_ExampleSystemCount", ",", "binaryLetter", ")", ")", "starPar", ".", "addParam", "(", "'name'", ",", "'HD {0}{1}'", ".", "format", "(", "ac", ".", "_ExampleSystemCount", ",", "binaryLetter", ")", ")", "starPar", ".", "addParam", "(", "'radius'", ",", "'0.95'", ")", "starPar", ".", "addParam", "(", "'spectraltype'", ",", "'G5'", ")", "starPar", ".", "addParam", "(", "'temperature'", ",", "'5370'", ")", "exampleStar", "=", "Star", "(", "starPar", ".", "params", ")", "exampleStar", ".", "flags", ".", "addFlag", "(", "'Fake'", ")", "if", "heirarchy", ":", "if", "binaryLetter", ":", "exampleBinary", "=", "genExampleBinary", "(", ")", "exampleBinary", ".", "_addChild", "(", "exampleStar", ")", "exampleStar", ".", "parent", "=", "exampleBinary", "else", ":", "exampleSystem", "=", "genExampleSystem", "(", ")", "exampleSystem", ".", "_addChild", "(", "exampleStar", ")", "exampleStar", ".", "parent", "=", "exampleSystem", "return", "exampleStar" ]
37.228571
13.428571
def on_open(self): """ Once the connection is made, start the query off and start an event loop to wait for a signal to stop. Results are yielded within receive(). """ def event_loop(): logger.debug(pformat(self.query.request)) self.send(json.dumps(self.query.request)) while not self.event.is_set(): #print('Waiting around on the socket: %s' % self.gettimeout()) self.event.wait(self.gettimeout()) logger.debug('Event loop terminating.') self.thread = threading.Thread( target=event_loop) self.thread.setDaemon(True) self.thread.start()
[ "def", "on_open", "(", "self", ")", ":", "def", "event_loop", "(", ")", ":", "logger", ".", "debug", "(", "pformat", "(", "self", ".", "query", ".", "request", ")", ")", "self", ".", "send", "(", "json", ".", "dumps", "(", "self", ".", "query", ".", "request", ")", ")", "while", "not", "self", ".", "event", ".", "is_set", "(", ")", ":", "#print('Waiting around on the socket: %s' % self.gettimeout())", "self", ".", "event", ".", "wait", "(", "self", ".", "gettimeout", "(", ")", ")", "logger", ".", "debug", "(", "'Event loop terminating.'", ")", "self", ".", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "event_loop", ")", "self", ".", "thread", ".", "setDaemon", "(", "True", ")", "self", ".", "thread", ".", "start", "(", ")" ]
37.105263
13.421053
def dftphotom(cfg): """Run the discrete-Fourier-transform photometry algorithm. See the module-level documentation and the output of ``casatask dftphotom --help`` for help. All of the algorithm configuration is specified in the *cfg* argument, which is an instance of :class:`Config`. """ tb = util.tools.table() ms = util.tools.ms() me = util.tools.measures() # Read stuff in. Even if the weight values don't have their # absolute scale set correctly, we can still use them to set the # relative weighting of the data points. # # datacol is (ncorr, nchan, nchunk) # flag is (ncorr, nchan, nchunk) # weight is (ncorr, nchunk) # uvw is (3, nchunk) # time is (nchunk) # axis_info.corr_axis is (ncorr) # axis_info.freq_axis.chan_freq is (nchan, 1) [for now?] # # Note that we apply msselect() again when reading the data because # selectinit() is broken, but the invocation here is good because it # affects the results from ms.range() and friends. if ':' in (cfg.spw or ''): warn('it looks like you are attempting to select channels within one or more spws') warn('this is NOT IMPLEMENTED; I will average over the whole spw instead') ms.open(b(cfg.vis)) totrows = ms.nrow() ms_sels = dict((n, cfg.get(n)) for n in util.msselect_keys if cfg.get(n) is not None) ms.msselect(b(ms_sels)) rangeinfo = ms.range(b'data_desc_id field_id'.split()) ddids = rangeinfo['data_desc_id'] fields = rangeinfo['field_id'] colnames = [cfg.datacol] + 'flag weight time axis_info'.split() rephase = (cfg.rephase is not None) if fields.size != 1: # I feel comfortable making this a fatal error, even if we're # not rephasing. die('selected data should contain precisely one field; got %d', fields.size) if rephase: fieldid = fields[0] tb.open(b(os.path.join(cfg.vis, 'FIELD'))) phdirinfo = tb.getcell(b'PHASE_DIR', fieldid) tb.close() if phdirinfo.shape[1] != 1: die('trying to rephase but target field (#%d) has a ' 'time-variable phase center, which I can\'t handle', fieldid) ra0, dec0 = phdirinfo[:,0] # in radians. # based on intflib/pwflux.py, which was copied from # hex/hex-lib-calcgainerr: dra = cfg.rephase[0] - ra0 dec = cfg.rephase[1] l = np.sin(dra) * np.cos(dec) m = np.sin(dec) * np.cos(dec0) - np.cos(dra) * np.cos(dec) * np.sin(dec0) n = np.sin(dec) * np.sin(dec0) + np.cos(dra) * np.cos(dec) * np.cos(dec0) n -= 1 # makes the work below easier lmn = np.asarray([l, m, n]) colnames.append('uvw') # Also need this although 99% of the time `ddid` and `spwid` are the same tb.open(b(os.path.join(cfg.vis, 'DATA_DESCRIPTION'))) ddspws = np.asarray(tb.getcol(b'SPECTRAL_WINDOW_ID')) tb.close() tbins = {} colnames = b(colnames) for ddindex, ddid in enumerate(ddids): # Starting in CASA 4.6, selectinit(ddid) stopped actually filtering # your data to match the specified DDID! What garbage. Work around # with our own filtering. ms_sels['taql'] = 'DATA_DESC_ID == %d' % ddid ms.msselect(b(ms_sels)) ms.selectinit(ddid) if cfg.polarization is not None: ms.selectpolarization(b(cfg.polarization.split(','))) ms.iterinit(maxrows=4096) ms.iterorigin() while True: cols = ms.getdata(items=colnames) if rephase: # With appropriate spw/DDID selection, `freqs` has shape # (nchan, 1). Convert to m^-1 so we can multiply against UVW # directly. freqs = cols['axis_info']['freq_axis']['chan_freq'] assert freqs.shape[1] == 1, 'internal inconsistency, chan_freq??' freqs = freqs[:,0] * util.INVERSE_C_MS for i in range(cols['time'].size): # all records time = cols['time'][i] # get out of UTC as fast as we can! For some reason # giving 'unit=s' below doesn't do what one might hope it would. # CASA can convert to a variety of timescales; TAI is probably # the safest conversion in terms of being helpful while remaining # close to the fundamental data, but TT is possible and should # be perfectly precise for standard applications. mq = me.epoch(b'utc', b({'value': time / 86400., 'unit': 'd'})) mjdtt = me.measure(b(mq), b'tt')['m0']['value'] tdata = tbins.get(mjdtt, None) if tdata is None: tdata = tbins[mjdtt] = [0., 0., 0., 0., 0] if rephase: uvw = cols['uvw'][:,i] ph = np.exp((0-2j) * np.pi * np.dot(lmn, uvw) * freqs) for j in range(cols['flag'].shape[0]): # all polns # We just average together all polarizations right now! # (Not actively, but passively by just iterating over them.) data = cols[cfg.datacol][j,:,i] flags = cols['flag'][j,:,i] # XXXXX casacore is currently (ca. 2012) broken and # returns the raw weights from the dataset rather than # applying the polarization selection. Fortunately all of # our weights are the same, and you can never fetch more # pol types than the dataset has, so this bit works # despite the bug. w = np.where(~flags)[0] if not w.size: continue # all flagged if rephase: data *= ph d = data[w].mean() # account for flagged parts. 90% sure this is the # right thing to do: wt = cols['weight'][j,i] * w.size / data.size wd = wt * d # note a little bit of a hack here to encode real^2 and # imag^2 separately: wd2 = wt * (d.real**2 + (1j) * d.imag**2) tdata[0] += wd tdata[1] += wd2 tdata[2] += wt tdata[3] += wt**2 tdata[4] += 1 if not ms.iternext(): break ms.reset() # reset selection filter so we can get next DDID ms.close() # Could gain some efficiency by using a better data structure than a dict(). smjd = sorted(six.iterkeys(tbins)) cfg.format.header(cfg) for mjd in smjd: wd, wd2, wt, wt2, n = tbins[mjd] if n < 3: # not enough data for meaningful statistics continue dtmin = 1440 * (mjd - smjd[0]) r_sc = wd.real / wt * cfg.datascale i_sc = wd.imag / wt * cfg.datascale r2_sc = wd2.real / wt * cfg.datascale**2 i2_sc = wd2.imag / wt * cfg.datascale**2 if cfg.believeweights: ru_sc = wt**-0.5 * cfg.datascale iu_sc = wt**-0.5 * cfg.datascale else: rv_sc = r2_sc - r_sc**2 # variance among real/imag msmts iv_sc = i2_sc - i_sc**2 ru_sc = np.sqrt(rv_sc * wt2) / wt # uncert in mean real/img values iu_sc = np.sqrt(iv_sc * wt2) / wt mag = np.sqrt(r_sc**2 + i_sc**2) umag = np.sqrt(r_sc**2 * ru_sc**2 + i_sc**2 * iu_sc**2) / mag cfg.format.row(cfg, mjd, dtmin, r_sc, ru_sc, i_sc, iu_sc, mag, umag, n)
[ "def", "dftphotom", "(", "cfg", ")", ":", "tb", "=", "util", ".", "tools", ".", "table", "(", ")", "ms", "=", "util", ".", "tools", ".", "ms", "(", ")", "me", "=", "util", ".", "tools", ".", "measures", "(", ")", "# Read stuff in. Even if the weight values don't have their", "# absolute scale set correctly, we can still use them to set the", "# relative weighting of the data points.", "#", "# datacol is (ncorr, nchan, nchunk)", "# flag is (ncorr, nchan, nchunk)", "# weight is (ncorr, nchunk)", "# uvw is (3, nchunk)", "# time is (nchunk)", "# axis_info.corr_axis is (ncorr)", "# axis_info.freq_axis.chan_freq is (nchan, 1) [for now?]", "#", "# Note that we apply msselect() again when reading the data because", "# selectinit() is broken, but the invocation here is good because it", "# affects the results from ms.range() and friends.", "if", "':'", "in", "(", "cfg", ".", "spw", "or", "''", ")", ":", "warn", "(", "'it looks like you are attempting to select channels within one or more spws'", ")", "warn", "(", "'this is NOT IMPLEMENTED; I will average over the whole spw instead'", ")", "ms", ".", "open", "(", "b", "(", "cfg", ".", "vis", ")", ")", "totrows", "=", "ms", ".", "nrow", "(", ")", "ms_sels", "=", "dict", "(", "(", "n", ",", "cfg", ".", "get", "(", "n", ")", ")", "for", "n", "in", "util", ".", "msselect_keys", "if", "cfg", ".", "get", "(", "n", ")", "is", "not", "None", ")", "ms", ".", "msselect", "(", "b", "(", "ms_sels", ")", ")", "rangeinfo", "=", "ms", ".", "range", "(", "b'data_desc_id field_id'", ".", "split", "(", ")", ")", "ddids", "=", "rangeinfo", "[", "'data_desc_id'", "]", "fields", "=", "rangeinfo", "[", "'field_id'", "]", "colnames", "=", "[", "cfg", ".", "datacol", "]", "+", "'flag weight time axis_info'", ".", "split", "(", ")", "rephase", "=", "(", "cfg", ".", "rephase", "is", "not", "None", ")", "if", "fields", ".", "size", "!=", "1", ":", "# I feel comfortable making this a fatal error, even if we're", "# not rephasing.", "die", "(", "'selected data should contain precisely one field; got %d'", ",", "fields", ".", "size", ")", "if", "rephase", ":", "fieldid", "=", "fields", "[", "0", "]", "tb", ".", "open", "(", "b", "(", "os", ".", "path", ".", "join", "(", "cfg", ".", "vis", ",", "'FIELD'", ")", ")", ")", "phdirinfo", "=", "tb", ".", "getcell", "(", "b'PHASE_DIR'", ",", "fieldid", ")", "tb", ".", "close", "(", ")", "if", "phdirinfo", ".", "shape", "[", "1", "]", "!=", "1", ":", "die", "(", "'trying to rephase but target field (#%d) has a '", "'time-variable phase center, which I can\\'t handle'", ",", "fieldid", ")", "ra0", ",", "dec0", "=", "phdirinfo", "[", ":", ",", "0", "]", "# in radians.", "# based on intflib/pwflux.py, which was copied from", "# hex/hex-lib-calcgainerr:", "dra", "=", "cfg", ".", "rephase", "[", "0", "]", "-", "ra0", "dec", "=", "cfg", ".", "rephase", "[", "1", "]", "l", "=", "np", ".", "sin", "(", "dra", ")", "*", "np", ".", "cos", "(", "dec", ")", "m", "=", "np", ".", "sin", "(", "dec", ")", "*", "np", ".", "cos", "(", "dec0", ")", "-", "np", ".", "cos", "(", "dra", ")", "*", "np", ".", "cos", "(", "dec", ")", "*", "np", ".", "sin", "(", "dec0", ")", "n", "=", "np", ".", "sin", "(", "dec", ")", "*", "np", ".", "sin", "(", "dec0", ")", "+", "np", ".", "cos", "(", "dra", ")", "*", "np", ".", "cos", "(", "dec", ")", "*", "np", ".", "cos", "(", "dec0", ")", "n", "-=", "1", "# makes the work below easier", "lmn", "=", "np", ".", "asarray", "(", "[", "l", ",", "m", ",", "n", "]", ")", "colnames", ".", "append", "(", "'uvw'", ")", "# Also need this although 99% of the time `ddid` and `spwid` are the same", "tb", ".", "open", "(", "b", "(", "os", ".", "path", ".", "join", "(", "cfg", ".", "vis", ",", "'DATA_DESCRIPTION'", ")", ")", ")", "ddspws", "=", "np", ".", "asarray", "(", "tb", ".", "getcol", "(", "b'SPECTRAL_WINDOW_ID'", ")", ")", "tb", ".", "close", "(", ")", "tbins", "=", "{", "}", "colnames", "=", "b", "(", "colnames", ")", "for", "ddindex", ",", "ddid", "in", "enumerate", "(", "ddids", ")", ":", "# Starting in CASA 4.6, selectinit(ddid) stopped actually filtering", "# your data to match the specified DDID! What garbage. Work around", "# with our own filtering.", "ms_sels", "[", "'taql'", "]", "=", "'DATA_DESC_ID == %d'", "%", "ddid", "ms", ".", "msselect", "(", "b", "(", "ms_sels", ")", ")", "ms", ".", "selectinit", "(", "ddid", ")", "if", "cfg", ".", "polarization", "is", "not", "None", ":", "ms", ".", "selectpolarization", "(", "b", "(", "cfg", ".", "polarization", ".", "split", "(", "','", ")", ")", ")", "ms", ".", "iterinit", "(", "maxrows", "=", "4096", ")", "ms", ".", "iterorigin", "(", ")", "while", "True", ":", "cols", "=", "ms", ".", "getdata", "(", "items", "=", "colnames", ")", "if", "rephase", ":", "# With appropriate spw/DDID selection, `freqs` has shape", "# (nchan, 1). Convert to m^-1 so we can multiply against UVW", "# directly.", "freqs", "=", "cols", "[", "'axis_info'", "]", "[", "'freq_axis'", "]", "[", "'chan_freq'", "]", "assert", "freqs", ".", "shape", "[", "1", "]", "==", "1", ",", "'internal inconsistency, chan_freq??'", "freqs", "=", "freqs", "[", ":", ",", "0", "]", "*", "util", ".", "INVERSE_C_MS", "for", "i", "in", "range", "(", "cols", "[", "'time'", "]", ".", "size", ")", ":", "# all records", "time", "=", "cols", "[", "'time'", "]", "[", "i", "]", "# get out of UTC as fast as we can! For some reason", "# giving 'unit=s' below doesn't do what one might hope it would.", "# CASA can convert to a variety of timescales; TAI is probably", "# the safest conversion in terms of being helpful while remaining", "# close to the fundamental data, but TT is possible and should", "# be perfectly precise for standard applications.", "mq", "=", "me", ".", "epoch", "(", "b'utc'", ",", "b", "(", "{", "'value'", ":", "time", "/", "86400.", ",", "'unit'", ":", "'d'", "}", ")", ")", "mjdtt", "=", "me", ".", "measure", "(", "b", "(", "mq", ")", ",", "b'tt'", ")", "[", "'m0'", "]", "[", "'value'", "]", "tdata", "=", "tbins", ".", "get", "(", "mjdtt", ",", "None", ")", "if", "tdata", "is", "None", ":", "tdata", "=", "tbins", "[", "mjdtt", "]", "=", "[", "0.", ",", "0.", ",", "0.", ",", "0.", ",", "0", "]", "if", "rephase", ":", "uvw", "=", "cols", "[", "'uvw'", "]", "[", ":", ",", "i", "]", "ph", "=", "np", ".", "exp", "(", "(", "0", "-", "2j", ")", "*", "np", ".", "pi", "*", "np", ".", "dot", "(", "lmn", ",", "uvw", ")", "*", "freqs", ")", "for", "j", "in", "range", "(", "cols", "[", "'flag'", "]", ".", "shape", "[", "0", "]", ")", ":", "# all polns", "# We just average together all polarizations right now!", "# (Not actively, but passively by just iterating over them.)", "data", "=", "cols", "[", "cfg", ".", "datacol", "]", "[", "j", ",", ":", ",", "i", "]", "flags", "=", "cols", "[", "'flag'", "]", "[", "j", ",", ":", ",", "i", "]", "# XXXXX casacore is currently (ca. 2012) broken and", "# returns the raw weights from the dataset rather than", "# applying the polarization selection. Fortunately all of", "# our weights are the same, and you can never fetch more", "# pol types than the dataset has, so this bit works", "# despite the bug.", "w", "=", "np", ".", "where", "(", "~", "flags", ")", "[", "0", "]", "if", "not", "w", ".", "size", ":", "continue", "# all flagged", "if", "rephase", ":", "data", "*=", "ph", "d", "=", "data", "[", "w", "]", ".", "mean", "(", ")", "# account for flagged parts. 90% sure this is the", "# right thing to do:", "wt", "=", "cols", "[", "'weight'", "]", "[", "j", ",", "i", "]", "*", "w", ".", "size", "/", "data", ".", "size", "wd", "=", "wt", "*", "d", "# note a little bit of a hack here to encode real^2 and", "# imag^2 separately:", "wd2", "=", "wt", "*", "(", "d", ".", "real", "**", "2", "+", "(", "1j", ")", "*", "d", ".", "imag", "**", "2", ")", "tdata", "[", "0", "]", "+=", "wd", "tdata", "[", "1", "]", "+=", "wd2", "tdata", "[", "2", "]", "+=", "wt", "tdata", "[", "3", "]", "+=", "wt", "**", "2", "tdata", "[", "4", "]", "+=", "1", "if", "not", "ms", ".", "iternext", "(", ")", ":", "break", "ms", ".", "reset", "(", ")", "# reset selection filter so we can get next DDID", "ms", ".", "close", "(", ")", "# Could gain some efficiency by using a better data structure than a dict().", "smjd", "=", "sorted", "(", "six", ".", "iterkeys", "(", "tbins", ")", ")", "cfg", ".", "format", ".", "header", "(", "cfg", ")", "for", "mjd", "in", "smjd", ":", "wd", ",", "wd2", ",", "wt", ",", "wt2", ",", "n", "=", "tbins", "[", "mjd", "]", "if", "n", "<", "3", ":", "# not enough data for meaningful statistics", "continue", "dtmin", "=", "1440", "*", "(", "mjd", "-", "smjd", "[", "0", "]", ")", "r_sc", "=", "wd", ".", "real", "/", "wt", "*", "cfg", ".", "datascale", "i_sc", "=", "wd", ".", "imag", "/", "wt", "*", "cfg", ".", "datascale", "r2_sc", "=", "wd2", ".", "real", "/", "wt", "*", "cfg", ".", "datascale", "**", "2", "i2_sc", "=", "wd2", ".", "imag", "/", "wt", "*", "cfg", ".", "datascale", "**", "2", "if", "cfg", ".", "believeweights", ":", "ru_sc", "=", "wt", "**", "-", "0.5", "*", "cfg", ".", "datascale", "iu_sc", "=", "wt", "**", "-", "0.5", "*", "cfg", ".", "datascale", "else", ":", "rv_sc", "=", "r2_sc", "-", "r_sc", "**", "2", "# variance among real/imag msmts", "iv_sc", "=", "i2_sc", "-", "i_sc", "**", "2", "ru_sc", "=", "np", ".", "sqrt", "(", "rv_sc", "*", "wt2", ")", "/", "wt", "# uncert in mean real/img values", "iu_sc", "=", "np", ".", "sqrt", "(", "iv_sc", "*", "wt2", ")", "/", "wt", "mag", "=", "np", ".", "sqrt", "(", "r_sc", "**", "2", "+", "i_sc", "**", "2", ")", "umag", "=", "np", ".", "sqrt", "(", "r_sc", "**", "2", "*", "ru_sc", "**", "2", "+", "i_sc", "**", "2", "*", "iu_sc", "**", "2", ")", "/", "mag", "cfg", ".", "format", ".", "row", "(", "cfg", ",", "mjd", ",", "dtmin", ",", "r_sc", ",", "ru_sc", ",", "i_sc", ",", "iu_sc", ",", "mag", ",", "umag", ",", "n", ")" ]
39.421875
21.859375
def group_create(auth=None, **kwargs): ''' Create a group CLI Example: .. code-block:: bash salt '*' keystoneng.group_create name=group1 salt '*' keystoneng.group_create name=group2 domain=domain1 description='my group2' ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_group(**kwargs)
[ "def", "group_create", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "keep_name", "=", "True", ",", "*", "*", "kwargs", ")", "return", "cloud", ".", "create_group", "(", "*", "*", "kwargs", ")" ]
27.142857
23.571429
def pack(self, packer=default_packer) -> bytes: ''' Args: packer (str or lambda): The vertex attributes to pack. Returns: bytes: The packed vertex data. Examples: .. code-block:: python import ModernGL from ModernGL.ext import obj model = obj.Obj.open('box.obj') # default packer data = model.pack() # same as the default packer data = model.pack('vx vy vz tx ty tz nx ny nz') # pack vertices data = model.pack('vx vy vz') # pack vertices and texture coordinates (xy) data = model.pack('vx vy vz tx ty') # pack vertices and normals data = model.pack('vx vy vz nx ny nz') # pack vertices with padding data = model.pack('vx vy vz 0.0') ''' if isinstance(packer, str): nodes = packer.split() packer = eval(PACKER % (len(nodes), ', '.join(nodes))) result = bytearray() for v, t, n in self.face: vx, vy, vz = self.vert[v - 1] tx, ty, tz = self.text[t - 1] if t is not None else (0.0, 0.0, 0.0) nx, ny, nz = self.norm[n - 1] if n is not None else (0.0, 0.0, 0.0) result += packer(vx, vy, vz, tx, ty, tz, nx, ny, nz) return bytes(result)
[ "def", "pack", "(", "self", ",", "packer", "=", "default_packer", ")", "->", "bytes", ":", "if", "isinstance", "(", "packer", ",", "str", ")", ":", "nodes", "=", "packer", ".", "split", "(", ")", "packer", "=", "eval", "(", "PACKER", "%", "(", "len", "(", "nodes", ")", ",", "', '", ".", "join", "(", "nodes", ")", ")", ")", "result", "=", "bytearray", "(", ")", "for", "v", ",", "t", ",", "n", "in", "self", ".", "face", ":", "vx", ",", "vy", ",", "vz", "=", "self", ".", "vert", "[", "v", "-", "1", "]", "tx", ",", "ty", ",", "tz", "=", "self", ".", "text", "[", "t", "-", "1", "]", "if", "t", "is", "not", "None", "else", "(", "0.0", ",", "0.0", ",", "0.0", ")", "nx", ",", "ny", ",", "nz", "=", "self", ".", "norm", "[", "n", "-", "1", "]", "if", "n", "is", "not", "None", "else", "(", "0.0", ",", "0.0", ",", "0.0", ")", "result", "+=", "packer", "(", "vx", ",", "vy", ",", "vz", ",", "tx", ",", "ty", ",", "tz", ",", "nx", ",", "ny", ",", "nz", ")", "return", "bytes", "(", "result", ")" ]
30.632653
22.428571
def register_result(self, job, skip_sanity_checks=False): """ function to register the result of a job This function is called from HB_master, don't call this from your script. """ if self.is_finished: raise RuntimeError("This HB iteration is finished, you can't register more results!") config_id = job.id config = job.kwargs['config'] budget = job.kwargs['budget'] timestamps = job.timestamps result = job.result exception = job.exception d = self.data[config_id] if not skip_sanity_checks: assert d.config == config, 'Configurations differ!' assert d.status == 'RUNNING', "Configuration wasn't scheduled for a run." assert d.budget == budget, 'Budgets differ (%f != %f)!'%(self.data[config_id]['budget'], budget) d.time_stamps[budget] = timestamps d.results[budget] = result if (not job.result is None) and np.isfinite(result['loss']): d.status = 'REVIEW' else: d.status = 'CRASHED' d.exceptions[budget] = exception self.num_running -= 1
[ "def", "register_result", "(", "self", ",", "job", ",", "skip_sanity_checks", "=", "False", ")", ":", "if", "self", ".", "is_finished", ":", "raise", "RuntimeError", "(", "\"This HB iteration is finished, you can't register more results!\"", ")", "config_id", "=", "job", ".", "id", "config", "=", "job", ".", "kwargs", "[", "'config'", "]", "budget", "=", "job", ".", "kwargs", "[", "'budget'", "]", "timestamps", "=", "job", ".", "timestamps", "result", "=", "job", ".", "result", "exception", "=", "job", ".", "exception", "d", "=", "self", ".", "data", "[", "config_id", "]", "if", "not", "skip_sanity_checks", ":", "assert", "d", ".", "config", "==", "config", ",", "'Configurations differ!'", "assert", "d", ".", "status", "==", "'RUNNING'", ",", "\"Configuration wasn't scheduled for a run.\"", "assert", "d", ".", "budget", "==", "budget", ",", "'Budgets differ (%f != %f)!'", "%", "(", "self", ".", "data", "[", "config_id", "]", "[", "'budget'", "]", ",", "budget", ")", "d", ".", "time_stamps", "[", "budget", "]", "=", "timestamps", "d", ".", "results", "[", "budget", "]", "=", "result", "if", "(", "not", "job", ".", "result", "is", "None", ")", "and", "np", ".", "isfinite", "(", "result", "[", "'loss'", "]", ")", ":", "d", ".", "status", "=", "'REVIEW'", "else", ":", "d", ".", "status", "=", "'CRASHED'", "d", ".", "exceptions", "[", "budget", "]", "=", "exception", "self", ".", "num_running", "-=", "1" ]
27.885714
22.685714
def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa): """ This is a helper method for get_removals_int_oxid! Args: spec_amts_oxi - a dict of species to their amounts in the structure oxid_el - the element to oxidize oxid_els - the full list of elements that might be oxidized numa - a running set of numbers of A cation at integer oxidation steps Returns: a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list """ # If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn #this is the state we want to oxidize! oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol]) oxid_new = math.floor(oxid_old + 1) #if this is not a valid solution, break out of here and don't add anything to the list if oxid_new > oxid_el.max_oxidation_state: return numa #update the spec_amts_oxi map to reflect that the oxidation took place spec_old = Specie(oxid_el.symbol, oxid_old) spec_new = Specie(oxid_el.symbol, oxid_new) specamt = spec_amts_oxi[spec_old] spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old} spec_amts_oxi[spec_new] = specamt spec_amts_oxi = Composition(spec_amts_oxi) #determine the amount of cation A in the structure needed for charge balance and add it to the list oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if spec.symbol not in self.cation.symbol]) a = max(0, -oxi_noA / self.cation_charge) numa = numa.union({a}) #recursively try the other oxidation states if a == 0: return numa else: for oxid_el in oxid_els: numa = numa.union( self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa)) return numa
[ "def", "_get_int_removals_helper", "(", "self", ",", "spec_amts_oxi", ",", "oxid_el", ",", "oxid_els", ",", "numa", ")", ":", "# If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn", "#this is the state we want to oxidize!", "oxid_old", "=", "min", "(", "[", "spec", ".", "oxi_state", "for", "spec", "in", "spec_amts_oxi", "if", "spec", ".", "symbol", "==", "oxid_el", ".", "symbol", "]", ")", "oxid_new", "=", "math", ".", "floor", "(", "oxid_old", "+", "1", ")", "#if this is not a valid solution, break out of here and don't add anything to the list", "if", "oxid_new", ">", "oxid_el", ".", "max_oxidation_state", ":", "return", "numa", "#update the spec_amts_oxi map to reflect that the oxidation took place", "spec_old", "=", "Specie", "(", "oxid_el", ".", "symbol", ",", "oxid_old", ")", "spec_new", "=", "Specie", "(", "oxid_el", ".", "symbol", ",", "oxid_new", ")", "specamt", "=", "spec_amts_oxi", "[", "spec_old", "]", "spec_amts_oxi", "=", "{", "sp", ":", "amt", "for", "sp", ",", "amt", "in", "spec_amts_oxi", ".", "items", "(", ")", "if", "sp", "!=", "spec_old", "}", "spec_amts_oxi", "[", "spec_new", "]", "=", "specamt", "spec_amts_oxi", "=", "Composition", "(", "spec_amts_oxi", ")", "#determine the amount of cation A in the structure needed for charge balance and add it to the list", "oxi_noA", "=", "sum", "(", "[", "spec", ".", "oxi_state", "*", "spec_amts_oxi", "[", "spec", "]", "for", "spec", "in", "spec_amts_oxi", "if", "spec", ".", "symbol", "not", "in", "self", ".", "cation", ".", "symbol", "]", ")", "a", "=", "max", "(", "0", ",", "-", "oxi_noA", "/", "self", ".", "cation_charge", ")", "numa", "=", "numa", ".", "union", "(", "{", "a", "}", ")", "#recursively try the other oxidation states", "if", "a", "==", "0", ":", "return", "numa", "else", ":", "for", "oxid_el", "in", "oxid_els", ":", "numa", "=", "numa", ".", "union", "(", "self", ".", "_get_int_removals_helper", "(", "spec_amts_oxi", ".", "copy", "(", ")", ",", "oxid_el", ",", "oxid_els", ",", "numa", ")", ")", "return", "numa" ]
47.953488
26.55814
def is_allowed(self, name_or_class, mask): # pragma: no cover """Return True is a new connection is allowed""" if isinstance(name_or_class, type): name = name_or_class.type else: name = name_or_class info = self.connections[name] limit = self.config[name + '_limit'] if limit and info['total'] >= limit: msg = ( "Sorry, there is too much DCC %s active. Please try again " "later.") % name.upper() self.bot.notice(mask, msg) return False if mask not in info['masks']: return True limit = self.config[name + '_user_limit'] if limit and info['masks'][mask] >= limit: msg = ( "Sorry, you have too many DCC %s active. Close the other " "connection(s) or wait a few seconds and try again." ) % name.upper() self.bot.notice(mask, msg) return False return True
[ "def", "is_allowed", "(", "self", ",", "name_or_class", ",", "mask", ")", ":", "# pragma: no cover", "if", "isinstance", "(", "name_or_class", ",", "type", ")", ":", "name", "=", "name_or_class", ".", "type", "else", ":", "name", "=", "name_or_class", "info", "=", "self", ".", "connections", "[", "name", "]", "limit", "=", "self", ".", "config", "[", "name", "+", "'_limit'", "]", "if", "limit", "and", "info", "[", "'total'", "]", ">=", "limit", ":", "msg", "=", "(", "\"Sorry, there is too much DCC %s active. Please try again \"", "\"later.\"", ")", "%", "name", ".", "upper", "(", ")", "self", ".", "bot", ".", "notice", "(", "mask", ",", "msg", ")", "return", "False", "if", "mask", "not", "in", "info", "[", "'masks'", "]", ":", "return", "True", "limit", "=", "self", ".", "config", "[", "name", "+", "'_user_limit'", "]", "if", "limit", "and", "info", "[", "'masks'", "]", "[", "mask", "]", ">=", "limit", ":", "msg", "=", "(", "\"Sorry, you have too many DCC %s active. Close the other \"", "\"connection(s) or wait a few seconds and try again.\"", ")", "%", "name", ".", "upper", "(", ")", "self", ".", "bot", ".", "notice", "(", "mask", ",", "msg", ")", "return", "False", "return", "True" ]
39.72
12.84
def load(self, df, centerings): """ A moderately mind-bendy meta-method which abstracts the internals of individual projections' load procedures. Parameters ---------- proj : geoplot.crs object instance A disguised reference to ``self``. df : GeoDataFrame The GeoDataFrame which has been passed as input to the plotter at the top level. This data is needed to calculate reasonable centering variables in cases in which the user does not already provide them; which is, incidentally, the reason behind all of this funny twice-instantiation loading in the first place. centerings: dct A dictionary containing names and centering methods. Certain projections have certain centering parameters whilst others lack them. For example, the geospatial projection contains both ``central_longitude`` and ``central_latitude`` instance parameter, which together control the center of the plot, while the North Pole Stereo projection has only a ``central_longitude`` instance parameter, implying that latitude is fixed (as indeed it is, as this projection is centered on the North Pole!). A top-level centerings method is provided in each of the ``geoplot`` top-level plot functions; each of the projection wrapper classes defined here in turn selects the functions from this list relevent to this particular instance and passes them to the ``_generic_load`` method here. We then in turn execute these functions to get defaults for our ``df`` and pass them off to our output ``cartopy.crs`` instance. Returns ------- crs : ``cartopy.crs`` object instance Returns a ``cartopy.crs`` object instance whose appropriate instance variables have been set to reasonable defaults wherever not already provided by the user. """ centering_variables = dict() if not df.empty and df.geometry.notna().any(): for key, func in centerings.items(): centering_variables[key] = func(df) return getattr(ccrs, self.__class__.__name__)(**{**centering_variables, **self.args})
[ "def", "load", "(", "self", ",", "df", ",", "centerings", ")", ":", "centering_variables", "=", "dict", "(", ")", "if", "not", "df", ".", "empty", "and", "df", ".", "geometry", ".", "notna", "(", ")", ".", "any", "(", ")", ":", "for", "key", ",", "func", "in", "centerings", ".", "items", "(", ")", ":", "centering_variables", "[", "key", "]", "=", "func", "(", "df", ")", "return", "getattr", "(", "ccrs", ",", "self", ".", "__class__", ".", "__name__", ")", "(", "*", "*", "{", "*", "*", "centering_variables", ",", "*", "*", "self", ".", "args", "}", ")" ]
60.513514
38.405405
def list_namespaced_ingress(self, namespace, **kwargs): """ list or watch objects of kind Ingress This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_ingress(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: NetworkingV1beta1IngressList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_ingress_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_ingress_with_http_info(namespace, **kwargs) return data
[ "def", "list_namespaced_ingress", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_namespaced_ingress_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_namespaced_ingress_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
166.678571
136.392857
def df_to_dat(net, df, define_cat_colors=False): ''' This is always run when data is loaded. ''' from . import categories # check if df has unique values df['mat'] = make_unique_labels.main(net, df['mat']) net.dat['mat'] = df['mat'].values net.dat['nodes']['row'] = df['mat'].index.tolist() net.dat['nodes']['col'] = df['mat'].columns.tolist() for inst_rc in ['row', 'col']: if type(net.dat['nodes'][inst_rc][0]) is tuple: # get the number of categories from the length of the tuple # subtract 1 because the name is the first element of the tuple num_cat = len(net.dat['nodes'][inst_rc][0]) - 1 net.dat['node_info'][inst_rc]['full_names'] = net.dat['nodes']\ [inst_rc] for inst_rcat in range(num_cat): net.dat['node_info'][inst_rc]['cat-' + str(inst_rcat)] = \ [i[inst_rcat + 1] for i in net.dat['nodes'][inst_rc]] net.dat['nodes'][inst_rc] = [i[0] for i in net.dat['nodes'][inst_rc]] if 'mat_up' in df: net.dat['mat_up'] = df['mat_up'].values net.dat['mat_dn'] = df['mat_dn'].values if 'mat_orig' in df: net.dat['mat_orig'] = df['mat_orig'].values categories.dict_cat(net, define_cat_colors=define_cat_colors)
[ "def", "df_to_dat", "(", "net", ",", "df", ",", "define_cat_colors", "=", "False", ")", ":", "from", ".", "import", "categories", "# check if df has unique values", "df", "[", "'mat'", "]", "=", "make_unique_labels", ".", "main", "(", "net", ",", "df", "[", "'mat'", "]", ")", "net", ".", "dat", "[", "'mat'", "]", "=", "df", "[", "'mat'", "]", ".", "values", "net", ".", "dat", "[", "'nodes'", "]", "[", "'row'", "]", "=", "df", "[", "'mat'", "]", ".", "index", ".", "tolist", "(", ")", "net", ".", "dat", "[", "'nodes'", "]", "[", "'col'", "]", "=", "df", "[", "'mat'", "]", ".", "columns", ".", "tolist", "(", ")", "for", "inst_rc", "in", "[", "'row'", ",", "'col'", "]", ":", "if", "type", "(", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "[", "0", "]", ")", "is", "tuple", ":", "# get the number of categories from the length of the tuple", "# subtract 1 because the name is the first element of the tuple", "num_cat", "=", "len", "(", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "[", "0", "]", ")", "-", "1", "net", ".", "dat", "[", "'node_info'", "]", "[", "inst_rc", "]", "[", "'full_names'", "]", "=", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "for", "inst_rcat", "in", "range", "(", "num_cat", ")", ":", "net", ".", "dat", "[", "'node_info'", "]", "[", "inst_rc", "]", "[", "'cat-'", "+", "str", "(", "inst_rcat", ")", "]", "=", "[", "i", "[", "inst_rcat", "+", "1", "]", "for", "i", "in", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "]", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "=", "[", "i", "[", "0", "]", "for", "i", "in", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "]", "if", "'mat_up'", "in", "df", ":", "net", ".", "dat", "[", "'mat_up'", "]", "=", "df", "[", "'mat_up'", "]", ".", "values", "net", ".", "dat", "[", "'mat_dn'", "]", "=", "df", "[", "'mat_dn'", "]", ".", "values", "if", "'mat_orig'", "in", "df", ":", "net", ".", "dat", "[", "'mat_orig'", "]", "=", "df", "[", "'mat_orig'", "]", ".", "values", "categories", ".", "dict_cat", "(", "net", ",", "define_cat_colors", "=", "define_cat_colors", ")" ]
32.162162
22.702703
def p_scalar__indented_flow(self, p): """ scalar : INDENT scalar_group DEDENT """ scalar_group = '\n'.join(p[2]) folded_scalar = fold(dedent(scalar_group)) p[0] = ScalarDispatch(folded_scalar, cast='str')
[ "def", "p_scalar__indented_flow", "(", "self", ",", "p", ")", ":", "scalar_group", "=", "'\\n'", ".", "join", "(", "p", "[", "2", "]", ")", "folded_scalar", "=", "fold", "(", "dedent", "(", "scalar_group", ")", ")", "p", "[", "0", "]", "=", "ScalarDispatch", "(", "folded_scalar", ",", "cast", "=", "'str'", ")" ]
35.285714
5
def get_object(self, path): """Get single object.""" key = self.native_container.get_key(path) return self.obj_cls.from_key(self, key)
[ "def", "get_object", "(", "self", ",", "path", ")", ":", "key", "=", "self", ".", "native_container", ".", "get_key", "(", "path", ")", "return", "self", ".", "obj_cls", ".", "from_key", "(", "self", ",", "key", ")" ]
38.75
7.25
def scan_ipaddr(ipaddr, line, project, split_path, apikey): """ If an IP Address is found, scan it """ logger.info('Found what I believe is an IP Address: %s', line.strip()) logger.info('File %s. Parsed IP Address: %s', split_path, ipaddr) with open(reports_dir + "ips-" + project + ".log", "a") as gate_report: gate_report.write('File {} contains what I believe is an IP Address: {}\n'.format(split_path, ipaddr)) v_api = virus_total.VirusTotal() scan_ip = v_api.send_ip(ipaddr, apikey) response_code = scan_ip['response_code'] verbose_msg = scan_ip['verbose_msg'] urls = scan_ip['detected_urls'] with open(reports_dir + "ips-" + project + ".log", "a") as gate_report: if urls: logger.error('%s has been known to resolve to the following malicious urls:', ipaddr) gate_report.write('{} has been known to resolve to the following malicious urls:\n'.format(ipaddr)) for url in urls: logger.info('%s on date: %s', url['url'], url['scan_date']) gate_report.write('{} on {}\n'.format(url['url'], url['scan_date'])) sleep(0.2) else: logger.info('No malicious DNS history found for: %s', ipaddr) gate_report.write('No malicious DNS history found for: {}\n'.format(ipaddr))
[ "def", "scan_ipaddr", "(", "ipaddr", ",", "line", ",", "project", ",", "split_path", ",", "apikey", ")", ":", "logger", ".", "info", "(", "'Found what I believe is an IP Address: %s'", ",", "line", ".", "strip", "(", ")", ")", "logger", ".", "info", "(", "'File %s. Parsed IP Address: %s'", ",", "split_path", ",", "ipaddr", ")", "with", "open", "(", "reports_dir", "+", "\"ips-\"", "+", "project", "+", "\".log\"", ",", "\"a\"", ")", "as", "gate_report", ":", "gate_report", ".", "write", "(", "'File {} contains what I believe is an IP Address: {}\\n'", ".", "format", "(", "split_path", ",", "ipaddr", ")", ")", "v_api", "=", "virus_total", ".", "VirusTotal", "(", ")", "scan_ip", "=", "v_api", ".", "send_ip", "(", "ipaddr", ",", "apikey", ")", "response_code", "=", "scan_ip", "[", "'response_code'", "]", "verbose_msg", "=", "scan_ip", "[", "'verbose_msg'", "]", "urls", "=", "scan_ip", "[", "'detected_urls'", "]", "with", "open", "(", "reports_dir", "+", "\"ips-\"", "+", "project", "+", "\".log\"", ",", "\"a\"", ")", "as", "gate_report", ":", "if", "urls", ":", "logger", ".", "error", "(", "'%s has been known to resolve to the following malicious urls:'", ",", "ipaddr", ")", "gate_report", ".", "write", "(", "'{} has been known to resolve to the following malicious urls:\\n'", ".", "format", "(", "ipaddr", ")", ")", "for", "url", "in", "urls", ":", "logger", ".", "info", "(", "'%s on date: %s'", ",", "url", "[", "'url'", "]", ",", "url", "[", "'scan_date'", "]", ")", "gate_report", ".", "write", "(", "'{} on {}\\n'", ".", "format", "(", "url", "[", "'url'", "]", ",", "url", "[", "'scan_date'", "]", ")", ")", "sleep", "(", "0.2", ")", "else", ":", "logger", ".", "info", "(", "'No malicious DNS history found for: %s'", ",", "ipaddr", ")", "gate_report", ".", "write", "(", "'No malicious DNS history found for: {}\\n'", ".", "format", "(", "ipaddr", ")", ")" ]
52.92
25.8
def data_indicators(self, indicators, entity_count): """Process Indicator data.""" data = [] # process indicator objects for xid, indicator_data in indicators.items(): entity_count += 1 if isinstance(indicator_data, dict): data.append(indicator_data) else: data.append(indicator_data.data) del indicators[xid] if entity_count >= self._batch_max_chunk: break return data, entity_count
[ "def", "data_indicators", "(", "self", ",", "indicators", ",", "entity_count", ")", ":", "data", "=", "[", "]", "# process indicator objects", "for", "xid", ",", "indicator_data", "in", "indicators", ".", "items", "(", ")", ":", "entity_count", "+=", "1", "if", "isinstance", "(", "indicator_data", ",", "dict", ")", ":", "data", ".", "append", "(", "indicator_data", ")", "else", ":", "data", ".", "append", "(", "indicator_data", ".", "data", ")", "del", "indicators", "[", "xid", "]", "if", "entity_count", ">=", "self", ".", "_batch_max_chunk", ":", "break", "return", "data", ",", "entity_count" ]
37
11.071429
def _binary_sample(image, label, n_samples_per_label, label_count): """ Sample `nsamples_per_label` points from the binary mask corresponding to `label` Randomly sample `nsamples_per_label` point form the binary mask corresponding to `label`. Sampling with replacement is used if the required `nsamples_per_label` is larger than the available `label_count` :param image: Input 2D raster label image :type image: uint8 numpy array :param label: Scalar value of label to consider :type label: uint8 :param n_samples_per_label: Number of points to sample form the binary mask :type n_samples_per_label: uint32 :param label_count: Number of points available for `label` :type label_count: uint32 :return: Sampled label value, row index of samples, col index of samples """ h_idx, w_idx = np.where(image == label) rand_idx = np.random.choice(h_idx.size, size=n_samples_per_label, replace=label_count < n_samples_per_label) return h_idx[rand_idx], w_idx[rand_idx]
[ "def", "_binary_sample", "(", "image", ",", "label", ",", "n_samples_per_label", ",", "label_count", ")", ":", "h_idx", ",", "w_idx", "=", "np", ".", "where", "(", "image", "==", "label", ")", "rand_idx", "=", "np", ".", "random", ".", "choice", "(", "h_idx", ".", "size", ",", "size", "=", "n_samples_per_label", ",", "replace", "=", "label_count", "<", "n_samples_per_label", ")", "return", "h_idx", "[", "rand_idx", "]", ",", "w_idx", "[", "rand_idx", "]" ]
50.904762
27.238095
def to_json(self, *, include=None): ''' Serializes this model to a JSON representation so it can be sent via an HTTP REST API ''' json = dict() if include is None or 'id' in include or '*' in include: json['id'] = self.id if include is None or '_type' in include or '*' in include: json['_type'] = type(self).cls_key() def fieldfilter(fieldtuple): return \ not fieldtuple[1].private and \ not isinstance(fieldtuple[1], Relation) and ( include is None or fieldtuple[0] in include or '*' in include ) json.update(dict(starmap( lambda fn, f: (fn, f.to_json(getattr(self, fn))), filter( fieldfilter, self.proxy ) ))) for requested_relation in parse_embed(include): relation_name, subfields = requested_relation if not hasattr(self.proxy, relation_name): continue relation = getattr(self.proxy, relation_name) if isinstance(relation, ForeignIdRelation): item = relation.get() if item is not None: json[relation_name] = item.to_json(include=subfields) else: json[relation_name] = None elif isinstance(relation, MultipleRelation): json[relation_name] = list(map(lambda o: o.to_json(include=subfields), relation.get())) return json
[ "def", "to_json", "(", "self", ",", "*", ",", "include", "=", "None", ")", ":", "json", "=", "dict", "(", ")", "if", "include", "is", "None", "or", "'id'", "in", "include", "or", "'*'", "in", "include", ":", "json", "[", "'id'", "]", "=", "self", ".", "id", "if", "include", "is", "None", "or", "'_type'", "in", "include", "or", "'*'", "in", "include", ":", "json", "[", "'_type'", "]", "=", "type", "(", "self", ")", ".", "cls_key", "(", ")", "def", "fieldfilter", "(", "fieldtuple", ")", ":", "return", "not", "fieldtuple", "[", "1", "]", ".", "private", "and", "not", "isinstance", "(", "fieldtuple", "[", "1", "]", ",", "Relation", ")", "and", "(", "include", "is", "None", "or", "fieldtuple", "[", "0", "]", "in", "include", "or", "'*'", "in", "include", ")", "json", ".", "update", "(", "dict", "(", "starmap", "(", "lambda", "fn", ",", "f", ":", "(", "fn", ",", "f", ".", "to_json", "(", "getattr", "(", "self", ",", "fn", ")", ")", ")", ",", "filter", "(", "fieldfilter", ",", "self", ".", "proxy", ")", ")", ")", ")", "for", "requested_relation", "in", "parse_embed", "(", "include", ")", ":", "relation_name", ",", "subfields", "=", "requested_relation", "if", "not", "hasattr", "(", "self", ".", "proxy", ",", "relation_name", ")", ":", "continue", "relation", "=", "getattr", "(", "self", ".", "proxy", ",", "relation_name", ")", "if", "isinstance", "(", "relation", ",", "ForeignIdRelation", ")", ":", "item", "=", "relation", ".", "get", "(", ")", "if", "item", "is", "not", "None", ":", "json", "[", "relation_name", "]", "=", "item", ".", "to_json", "(", "include", "=", "subfields", ")", "else", ":", "json", "[", "relation_name", "]", "=", "None", "elif", "isinstance", "(", "relation", ",", "MultipleRelation", ")", ":", "json", "[", "relation_name", "]", "=", "list", "(", "map", "(", "lambda", "o", ":", "o", ".", "to_json", "(", "include", "=", "subfields", ")", ",", "relation", ".", "get", "(", ")", ")", ")", "return", "json" ]
33.8
23.133333
def get_account_history(self, address, offset=None, count=None): """ Get the history of account transactions over a block range Returns a dict keyed by blocks, which map to lists of account state transitions """ cur = self.db.cursor() return namedb_get_account_history(cur, address, offset=offset, count=count)
[ "def", "get_account_history", "(", "self", ",", "address", ",", "offset", "=", "None", ",", "count", "=", "None", ")", ":", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "return", "namedb_get_account_history", "(", "cur", ",", "address", ",", "offset", "=", "offset", ",", "count", "=", "count", ")" ]
50.285714
21.428571
def get_init_score(self): """Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. """ if self.init_score is None: self.init_score = self.get_field('init_score') return self.init_score
[ "def", "get_init_score", "(", "self", ")", ":", "if", "self", ".", "init_score", "is", "None", ":", "self", ".", "init_score", "=", "self", ".", "get_field", "(", "'init_score'", ")", "return", "self", ".", "init_score" ]
28.272727
13.090909
def main(): """pyprf_feature entry point.""" # %% Print Welcome message strWelcome = 'pyprf_feature ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) # %% Get list of input arguments # Create parser object: objParser = argparse.ArgumentParser() # Add argument to namespace - config file path: objParser.add_argument('-config', metavar='config.csv', help='Absolute file path of config file with \ parameters for pRF analysis. Ignored if in \ testing mode.' ) # Add argument to namespace -mdl_rsp flag: objParser.add_argument('-strPathHrf', default=None, required=False, metavar='/path/to/custom_hrf_parameter.npy', help='Path to npy file with custom hrf parameters. \ Ignored if in testing mode.') objParser.add_argument('-supsur', nargs='+', help='List of floats that represent the ratio of \ size neg surround to size pos center.', type=float, default=None) # Add argument to namespace -save_tc flag: objParser.add_argument('-save_tc', dest='save_tc', action='store_true', default=False, help='Save fitted and empirical time courses to \ nifti file. Ignored if in testing mode.') # Add argument to namespace -mdl_rsp flag: objParser.add_argument('-mdl_rsp', dest='lgcMdlRsp', action='store_true', default=False, help='When saving fitted and empirical time \ courses, should fitted aperture responses be \ saved as well? Ignored if in testing mode.') # Namespace object containign arguments and values: objNspc = objParser.parse_args() # Get path of config file from argument parser: strCsvCnfg = objNspc.config # %% Decide which action to perform # If no config argument is provided, print info to user. if strCsvCnfg is None: print('Please provide the file path to a config file, e.g.:') print(' pyprf_feature -config /path/to/my_config_file.csv') # If config file is provided, either perform fitting or recreate fitted # and empirical time courses depending on whether save_tc is True or False else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # If save_tc true, save fitted and empirical time courses to nifti file # This assumes that fitting has already been run and will throw an # error if the resulting nii files of the fitting cannot be found. if objNspc.save_tc: print('***Mode: Save fitted and empirical time courses***') if objNspc.lgcMdlRsp: print(' ***Also save fitted aperture responses***') # Call to function save_tc_to_nii(strCsvCnfg, lgcTest=lgcTest, lstRat=objNspc.supsur, lgcMdlRsp=objNspc.lgcMdlRsp, strPathHrf=objNspc.strPathHrf) # If save_tc false, perform pRF fitting, either with or without # suppressive surround else: # Perform pRF fitting without suppressive surround if objNspc.supsur is None: print('***Mode: Fit pRF models, no suppressive surround***') # Call to main function, to invoke pRF fitting: pyprf(strCsvCnfg, lgcTest, varRat=None, strPathHrf=objNspc.strPathHrf) # Perform pRF fitting with suppressive surround else: print('***Mode: Fit pRF models, suppressive surround***') # Load config parameters from csv file into dictionary: dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest, lgcPrint=False) # Load config parameters from dictionary into namespace. # We do this on every loop so we have a fresh start in case # variables are redefined during the prf analysis cfg = cls_set_config(dicCnfg) # Make sure that lgcCrteMdl is set to True since we will need # to loop iteratively over pyprf_feature with different ratios # for size surround to size center. On every loop models, # reflecting the new ratio, need to be created from scratch errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.' assert cfg.lgcCrteMdl, errorMsg # Make sure that switchHrf is set to 1. It would not make sense # to find the negative surround for the hrf deriavtive function errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.' assert cfg.switchHrfSet == 1, errorMsg # Get list with size ratios lstRat = objNspc.supsur # Make sure that all ratios are larger than 1.0 errorMsg = 'All provided ratios need to be larger than 1.0' assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg # Append None as the first entry, so fitting without surround # is performed once as well lstRat.insert(0, None) # Loop over ratios and find best pRF for varRat in lstRat: # Print to command line, so the user knows which exponent # is used print('---Ratio surround to center: ' + str(varRat)) # Call to main function, to invoke pRF analysis: pyprf(strCsvCnfg, lgcTest=lgcTest, varRat=varRat, strPathHrf=objNspc.strPathHrf) # List with name suffices of output images: lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity', '_Betas'] # Compare results for the different ratios, export nii files # based on the results of the comparison and delete in-between # results # Replace first entry (None) with 1, so it can be saved to nii lstRat[0] = 1.0 # Append 'hrf' to cfg.strPathOut, if fitting was done with # custom hrf if objNspc.strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl, lgcDel=True)
[ "def", "main", "(", ")", ":", "# %% Print Welcome message", "strWelcome", "=", "'pyprf_feature '", "+", "__version__", "strDec", "=", "'='", "*", "len", "(", "strWelcome", ")", "print", "(", "strDec", "+", "'\\n'", "+", "strWelcome", "+", "'\\n'", "+", "strDec", ")", "# %% Get list of input arguments", "# Create parser object:", "objParser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# Add argument to namespace - config file path:", "objParser", ".", "add_argument", "(", "'-config'", ",", "metavar", "=", "'config.csv'", ",", "help", "=", "'Absolute file path of config file with \\\n parameters for pRF analysis. Ignored if in \\\n testing mode.'", ")", "# Add argument to namespace -mdl_rsp flag:", "objParser", ".", "add_argument", "(", "'-strPathHrf'", ",", "default", "=", "None", ",", "required", "=", "False", ",", "metavar", "=", "'/path/to/custom_hrf_parameter.npy'", ",", "help", "=", "'Path to npy file with custom hrf parameters. \\\n Ignored if in testing mode.'", ")", "objParser", ".", "add_argument", "(", "'-supsur'", ",", "nargs", "=", "'+'", ",", "help", "=", "'List of floats that represent the ratio of \\\n size neg surround to size pos center.'", ",", "type", "=", "float", ",", "default", "=", "None", ")", "# Add argument to namespace -save_tc flag:", "objParser", ".", "add_argument", "(", "'-save_tc'", ",", "dest", "=", "'save_tc'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Save fitted and empirical time courses to \\\n nifti file. Ignored if in testing mode.'", ")", "# Add argument to namespace -mdl_rsp flag:", "objParser", ".", "add_argument", "(", "'-mdl_rsp'", ",", "dest", "=", "'lgcMdlRsp'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'When saving fitted and empirical time \\\n courses, should fitted aperture responses be \\\n saved as well? Ignored if in testing mode.'", ")", "# Namespace object containign arguments and values:", "objNspc", "=", "objParser", ".", "parse_args", "(", ")", "# Get path of config file from argument parser:", "strCsvCnfg", "=", "objNspc", ".", "config", "# %% Decide which action to perform", "# If no config argument is provided, print info to user.", "if", "strCsvCnfg", "is", "None", ":", "print", "(", "'Please provide the file path to a config file, e.g.:'", ")", "print", "(", "' pyprf_feature -config /path/to/my_config_file.csv'", ")", "# If config file is provided, either perform fitting or recreate fitted", "# and empirical time courses depending on whether save_tc is True or False", "else", ":", "# Signal non-test mode to lower functions (needed for pytest):", "lgcTest", "=", "False", "# If save_tc true, save fitted and empirical time courses to nifti file", "# This assumes that fitting has already been run and will throw an", "# error if the resulting nii files of the fitting cannot be found.", "if", "objNspc", ".", "save_tc", ":", "print", "(", "'***Mode: Save fitted and empirical time courses***'", ")", "if", "objNspc", ".", "lgcMdlRsp", ":", "print", "(", "' ***Also save fitted aperture responses***'", ")", "# Call to function", "save_tc_to_nii", "(", "strCsvCnfg", ",", "lgcTest", "=", "lgcTest", ",", "lstRat", "=", "objNspc", ".", "supsur", ",", "lgcMdlRsp", "=", "objNspc", ".", "lgcMdlRsp", ",", "strPathHrf", "=", "objNspc", ".", "strPathHrf", ")", "# If save_tc false, perform pRF fitting, either with or without", "# suppressive surround", "else", ":", "# Perform pRF fitting without suppressive surround", "if", "objNspc", ".", "supsur", "is", "None", ":", "print", "(", "'***Mode: Fit pRF models, no suppressive surround***'", ")", "# Call to main function, to invoke pRF fitting:", "pyprf", "(", "strCsvCnfg", ",", "lgcTest", ",", "varRat", "=", "None", ",", "strPathHrf", "=", "objNspc", ".", "strPathHrf", ")", "# Perform pRF fitting with suppressive surround", "else", ":", "print", "(", "'***Mode: Fit pRF models, suppressive surround***'", ")", "# Load config parameters from csv file into dictionary:", "dicCnfg", "=", "load_config", "(", "strCsvCnfg", ",", "lgcTest", "=", "lgcTest", ",", "lgcPrint", "=", "False", ")", "# Load config parameters from dictionary into namespace.", "# We do this on every loop so we have a fresh start in case", "# variables are redefined during the prf analysis", "cfg", "=", "cls_set_config", "(", "dicCnfg", ")", "# Make sure that lgcCrteMdl is set to True since we will need", "# to loop iteratively over pyprf_feature with different ratios", "# for size surround to size center. On every loop models,", "# reflecting the new ratio, need to be created from scratch", "errorMsg", "=", "'lgcCrteMdl needs to be set to True for -supsur.'", "assert", "cfg", ".", "lgcCrteMdl", ",", "errorMsg", "# Make sure that switchHrf is set to 1. It would not make sense", "# to find the negative surround for the hrf deriavtive function", "errorMsg", "=", "'switchHrfSet needs to be set to 1 for -supsur.'", "assert", "cfg", ".", "switchHrfSet", "==", "1", ",", "errorMsg", "# Get list with size ratios", "lstRat", "=", "objNspc", ".", "supsur", "# Make sure that all ratios are larger than 1.0", "errorMsg", "=", "'All provided ratios need to be larger than 1.0'", "assert", "np", ".", "all", "(", "np", ".", "greater", "(", "np", ".", "array", "(", "lstRat", ")", ",", "1.0", ")", ")", ",", "errorMsg", "# Append None as the first entry, so fitting without surround", "# is performed once as well", "lstRat", ".", "insert", "(", "0", ",", "None", ")", "# Loop over ratios and find best pRF", "for", "varRat", "in", "lstRat", ":", "# Print to command line, so the user knows which exponent", "# is used", "print", "(", "'---Ratio surround to center: '", "+", "str", "(", "varRat", ")", ")", "# Call to main function, to invoke pRF analysis:", "pyprf", "(", "strCsvCnfg", ",", "lgcTest", "=", "lgcTest", ",", "varRat", "=", "varRat", ",", "strPathHrf", "=", "objNspc", ".", "strPathHrf", ")", "# List with name suffices of output images:", "lstNiiNames", "=", "[", "'_x_pos'", ",", "'_y_pos'", ",", "'_SD'", ",", "'_R2'", ",", "'_polar_angle'", ",", "'_eccentricity'", ",", "'_Betas'", "]", "# Compare results for the different ratios, export nii files", "# based on the results of the comparison and delete in-between", "# results", "# Replace first entry (None) with 1, so it can be saved to nii", "lstRat", "[", "0", "]", "=", "1.0", "# Append 'hrf' to cfg.strPathOut, if fitting was done with", "# custom hrf", "if", "objNspc", ".", "strPathHrf", "is", "not", "None", ":", "cfg", ".", "strPathOut", "=", "cfg", ".", "strPathOut", "+", "'_hrf'", "cmp_res_R2", "(", "lstRat", ",", "lstNiiNames", ",", "cfg", ".", "strPathOut", ",", "cfg", ".", "strPathMdl", ",", "lgcDel", "=", "True", ")" ]
43.898734
24.493671
def evaluateplanarPotentials(Pot,R,phi=None,t=0.,dR=0,dphi=0): """ NAME: evaluateplanarPotentials PURPOSE: evaluate a (list of) planarPotential instance(s) INPUT: Pot - (list of) planarPotential instance(s) R - Cylindrical radius (can be Quantity) phi= azimuth (optional; can be Quantity) t= time (optional; can be Quantity) dR=, dphi= if set to non-zero integers, return the dR,dphi't derivative instead OUTPUT: Phi(R(,phi,t)) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ return _evaluateplanarPotentials(Pot,R,phi=phi,t=t,dR=dR,dphi=dphi)
[ "def", "evaluateplanarPotentials", "(", "Pot", ",", "R", ",", "phi", "=", "None", ",", "t", "=", "0.", ",", "dR", "=", "0", ",", "dphi", "=", "0", ")", ":", "return", "_evaluateplanarPotentials", "(", "Pot", ",", "R", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "dR", "=", "dR", ",", "dphi", "=", "dphi", ")" ]
19.375
27.3125
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
[ "def", "run", "(", "self", ",", "file", ",", "updateconfig", "=", "True", ",", "clean", "=", "False", ",", "path", "=", "None", ")", ":", "if", "updateconfig", ":", "self", ".", "update_config", "(", ")", "# Try to find SExtractor program", "# This will raise an exception if it failed", "self", ".", "program", ",", "self", ".", "version", "=", "self", ".", "setup", "(", "path", ")", "commandline", "=", "(", "self", ".", "program", "+", "\" -c \"", "+", "self", ".", "config", "[", "'CONFIG_FILE'", "]", "+", "\" \"", "+", "file", ")", "# print commandline", "rcode", "=", "os", ".", "system", "(", "commandline", ")", "if", "(", "rcode", ")", ":", "raise", "SExtractorException", "(", "\"SExtractor command [%s] failed.\"", "%", "commandline", ")", "if", "clean", ":", "self", ".", "clean", "(", ")" ]
26.818182
22.636364
def _callbackPlaceFillOrders(self, d): """ This method distringuishes notifications caused by Matched orders from those caused by placed orders """ if isinstance(d, FilledOrder): self.onOrderMatched(d) elif isinstance(d, Order): self.onOrderPlaced(d) elif isinstance(d, UpdateCallOrder): self.onUpdateCallOrder(d) else: pass
[ "def", "_callbackPlaceFillOrders", "(", "self", ",", "d", ")", ":", "if", "isinstance", "(", "d", ",", "FilledOrder", ")", ":", "self", ".", "onOrderMatched", "(", "d", ")", "elif", "isinstance", "(", "d", ",", "Order", ")", ":", "self", ".", "onOrderPlaced", "(", "d", ")", "elif", "isinstance", "(", "d", ",", "UpdateCallOrder", ")", ":", "self", ".", "onUpdateCallOrder", "(", "d", ")", "else", ":", "pass" ]
35.083333
7.25
def _dequeue_update(self,change): """ Only update when all changes are done """ self._update_count -=1 if self._update_count !=0: return self.update_shape(change)
[ "def", "_dequeue_update", "(", "self", ",", "change", ")", ":", "self", ".", "_update_count", "-=", "1", "if", "self", ".", "_update_count", "!=", "0", ":", "return", "self", ".", "update_shape", "(", "change", ")" ]
34.333333
8
def find_checks(argument_name): """ Find all globally visible functions where the first argument name starts with argument_name. """ checks = [] function_type = type(find_checks) for name, function in globals().iteritems(): if type(function) is function_type: args = inspect.getargspec(function)[0] if len(args) >= 1 and args[0].startswith(argument_name): checks.append((name, function, args)) checks.sort() return checks
[ "def", "find_checks", "(", "argument_name", ")", ":", "checks", "=", "[", "]", "function_type", "=", "type", "(", "find_checks", ")", "for", "name", ",", "function", "in", "globals", "(", ")", ".", "iteritems", "(", ")", ":", "if", "type", "(", "function", ")", "is", "function_type", ":", "args", "=", "inspect", ".", "getargspec", "(", "function", ")", "[", "0", "]", "if", "len", "(", "args", ")", ">=", "1", "and", "args", "[", "0", "]", ".", "startswith", "(", "argument_name", ")", ":", "checks", ".", "append", "(", "(", "name", ",", "function", ",", "args", ")", ")", "checks", ".", "sort", "(", ")", "return", "checks" ]
35.142857
13.142857
def _echo_byte(self, byte): """ Echo a character back to the client and convert LF into CR\LF. """ if byte == '\n': self.send_buffer += '\r' if self.telnet_echo_password: self.send_buffer += '*' else: self.send_buffer += byte
[ "def", "_echo_byte", "(", "self", ",", "byte", ")", ":", "if", "byte", "==", "'\\n'", ":", "self", ".", "send_buffer", "+=", "'\\r'", "if", "self", ".", "telnet_echo_password", ":", "self", ".", "send_buffer", "+=", "'*'", "else", ":", "self", ".", "send_buffer", "+=", "byte" ]
30
10.2
def modify(self, view): """ adds the get item as extra context """ view.params['extra_context'][self.get['name']] = self.get['value'] return view
[ "def", "modify", "(", "self", ",", "view", ")", ":", "view", ".", "params", "[", "'extra_context'", "]", "[", "self", ".", "get", "[", "'name'", "]", "]", "=", "self", ".", "get", "[", "'value'", "]", "return", "view" ]
30
12.333333
def delete_relationship(cls, id, related_collection_name, related_resource=None): """ Deprecated for version 1.1.0. Please use update_relationship """ try: this_resource = cls.nodes.get(id=id, active=True) if not related_resource: r = this_resource.delete_relationship_collection(related_collection_name) else: r = this_resource.delete_individual_relationship(related_collection_name, related_resource) except DoesNotExist: r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND]) return r
[ "def", "delete_relationship", "(", "cls", ",", "id", ",", "related_collection_name", ",", "related_resource", "=", "None", ")", ":", "try", ":", "this_resource", "=", "cls", ".", "nodes", ".", "get", "(", "id", "=", "id", ",", "active", "=", "True", ")", "if", "not", "related_resource", ":", "r", "=", "this_resource", ".", "delete_relationship_collection", "(", "related_collection_name", ")", "else", ":", "r", "=", "this_resource", ".", "delete_individual_relationship", "(", "related_collection_name", ",", "related_resource", ")", "except", "DoesNotExist", ":", "r", "=", "application_codes", ".", "error_response", "(", "[", "application_codes", ".", "RESOURCE_NOT_FOUND", "]", ")", "return", "r" ]
48.153846
26.615385
def simxTransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(filePathAndName) is str): filePathAndName=filePathAndName.encode('utf-8') return c_TransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode)
[ "def", "simxTransferFile", "(", "clientID", ",", "filePathAndName", ",", "fileName_serverSide", ",", "timeOut", ",", "operationMode", ")", ":", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "filePathAndName", ")", "is", "str", ")", ":", "filePathAndName", "=", "filePathAndName", ".", "encode", "(", "'utf-8'", ")", "return", "c_TransferFile", "(", "clientID", ",", "filePathAndName", ",", "fileName_serverSide", ",", "timeOut", ",", "operationMode", ")" ]
52.125
38.625
def mean(a, rep=0.75, **kwargs): """Compute the average along a 1D array like ma.mean, but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep, then the result is a masked value """ return rfunc(a, ma.mean, rep, **kwargs)
[ "def", "mean", "(", "a", ",", "rep", "=", "0.75", ",", "*", "*", "kwargs", ")", ":", "return", "rfunc", "(", "a", ",", "ma", ".", "mean", ",", "rep", ",", "*", "*", "kwargs", ")" ]
42.166667
8.5
def update_aggregation(self, course, aggregationid, new_data): """ Update aggregation and returns a list of errored students""" student_list = self.user_manager.get_course_registered_users(course, False) # If aggregation is new if aggregationid == 'None': # Remove _id for correct insertion del new_data['_id'] new_data["courseid"] = course.get_id() # Insert the new aggregation result = self.database.aggregations.insert_one(new_data) # Retrieve new aggregation id aggregationid = result.inserted_id new_data['_id'] = result.inserted_id aggregation = new_data else: aggregation = self.database.aggregations.find_one({"_id": ObjectId(aggregationid), "courseid": course.get_id()}) # Check tutors new_data["tutors"] = [tutor for tutor in new_data["tutors"] if tutor in course.get_staff()] students, groups, errored_students = [], [], [] # Check the students for student in new_data["students"]: if student in student_list: # Remove user from the other aggregation self.database.aggregations.find_one_and_update({"courseid": course.get_id(), "groups.students": student}, {"$pull": {"groups.$.students": student, "students": student}}) self.database.aggregations.find_one_and_update({"courseid": course.get_id(), "students": student}, {"$pull": {"students": student}}) students.append(student) else: # Check if user can be registered user_info = self.user_manager.get_user_info(student) if user_info is None or student in aggregation["tutors"]: errored_students.append(student) else: students.append(student) removed_students = [student for student in aggregation["students"] if student not in new_data["students"]] self.database.aggregations.find_one_and_update({"courseid": course.get_id(), "default": True}, {"$push": {"students": {"$each": removed_students}}}) new_data["students"] = students # Check the groups for group in new_data["groups"]: group["students"] = [student for student in group["students"] if student in new_data["students"]] if len(group["students"]) <= group["size"]: groups.append(group) new_data["groups"] = groups # Check for default aggregation if new_data['default']: self.database.aggregations.find_one_and_update({"courseid": course.get_id(), "default": True}, {"$set": {"default": False}}) aggregation = self.database.aggregations.find_one_and_update( {"_id": ObjectId(aggregationid)}, {"$set": {"description": new_data["description"], "students": students, "tutors": new_data["tutors"], "groups": groups, "default": new_data['default']}}, return_document=ReturnDocument.AFTER) return aggregation, errored_students
[ "def", "update_aggregation", "(", "self", ",", "course", ",", "aggregationid", ",", "new_data", ")", ":", "student_list", "=", "self", ".", "user_manager", ".", "get_course_registered_users", "(", "course", ",", "False", ")", "# If aggregation is new", "if", "aggregationid", "==", "'None'", ":", "# Remove _id for correct insertion", "del", "new_data", "[", "'_id'", "]", "new_data", "[", "\"courseid\"", "]", "=", "course", ".", "get_id", "(", ")", "# Insert the new aggregation", "result", "=", "self", ".", "database", ".", "aggregations", ".", "insert_one", "(", "new_data", ")", "# Retrieve new aggregation id", "aggregationid", "=", "result", ".", "inserted_id", "new_data", "[", "'_id'", "]", "=", "result", ".", "inserted_id", "aggregation", "=", "new_data", "else", ":", "aggregation", "=", "self", ".", "database", ".", "aggregations", ".", "find_one", "(", "{", "\"_id\"", ":", "ObjectId", "(", "aggregationid", ")", ",", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", "}", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "new_data", "[", "\"tutors\"", "]", "if", "tutor", "in", "course", ".", "get_staff", "(", ")", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "# Remove user from the other aggregation", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "aggregation", "[", "\"tutors\"", "]", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "aggregation", "[", "\"students\"", "]", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "group", "[", "\"students\"", "]", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "# Check for default aggregation", "if", "new_data", "[", "'default'", "]", ":", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$set\"", ":", "{", "\"default\"", ":", "False", "}", "}", ")", "aggregation", "=", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "aggregationid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", ",", "\"default\"", ":", "new_data", "[", "'default'", "]", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "aggregation", ",", "errored_students" ]
47.808824
28.602941
def lookup_model_by_kind(kind): """Look up the model instance for a given Datastore kind. Parameters: kind(str) Raises: RuntimeError: If a model for the given kind has not been defined. Returns: model: The model class. """ model = _known_models.get(kind) if model is None: raise RuntimeError(f"Model for kind {kind!r} not found.") return model
[ "def", "lookup_model_by_kind", "(", "kind", ")", ":", "model", "=", "_known_models", ".", "get", "(", "kind", ")", "if", "model", "is", "None", ":", "raise", "RuntimeError", "(", "f\"Model for kind {kind!r} not found.\"", ")", "return", "model" ]
23.294118
21.529412
def evaluate_tour_M(self, tour): """ Use Cythonized version to evaluate the score of a current tour """ from .chic import score_evaluate_M return score_evaluate_M(tour, self.active_sizes, self.M)
[ "def", "evaluate_tour_M", "(", "self", ",", "tour", ")", ":", "from", ".", "chic", "import", "score_evaluate_M", "return", "score_evaluate_M", "(", "tour", ",", "self", ".", "active_sizes", ",", "self", ".", "M", ")" ]
44.6
6.8
def is_module_stdlib(file_name): """Returns True if the file_name is in the lib directory.""" # TODO: Move these calls away from this function so it doesn't have to run # every time. lib_path = sysconfig.get_python_lib() path = os.path.split(lib_path) if path[1] == 'site-packages': lib_path = path[0] return file_name.lower().startswith(lib_path.lower())
[ "def", "is_module_stdlib", "(", "file_name", ")", ":", "# TODO: Move these calls away from this function so it doesn't have to run", "# every time.", "lib_path", "=", "sysconfig", ".", "get_python_lib", "(", ")", "path", "=", "os", ".", "path", ".", "split", "(", "lib_path", ")", "if", "path", "[", "1", "]", "==", "'site-packages'", ":", "lib_path", "=", "path", "[", "0", "]", "return", "file_name", ".", "lower", "(", ")", ".", "startswith", "(", "lib_path", ".", "lower", "(", ")", ")" ]
42.555556
12.555556
def add(self, filename, raw_data=None, dx=None): """ Generic method to add a file to the session. This is the main method to use when adding files to a Session! If an APK file is supplied, all DEX files are analyzed too. For DEX and ODEX files, only this file is analyzed (what else should be analyzed). Returns the SHA256 of the analyzed file. :param filename: filename to load :param raw_data: bytes of the file, or None to load the file from filename :param dx: An already exiting :class:`~androguard.core.analysis.analysis.Analysis` object :return: the sha256 of the file or None on failure """ if not raw_data: log.debug("Loading file from '{}'".format(filename)) with open(filename, "rb") as fp: raw_data = fp.read() ret = androconf.is_android_raw(raw_data) log.debug("Found filetype: '{}'".format(ret)) if not ret: return None if ret == "APK": digest, _ = self.addAPK(filename, raw_data) elif ret == "DEX": digest, _, _ = self.addDEX(filename, raw_data, dx) elif ret == "DEY": digest, _, _ = self.addDEY(filename, raw_data, dx) else: return None return digest
[ "def", "add", "(", "self", ",", "filename", ",", "raw_data", "=", "None", ",", "dx", "=", "None", ")", ":", "if", "not", "raw_data", ":", "log", ".", "debug", "(", "\"Loading file from '{}'\"", ".", "format", "(", "filename", ")", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fp", ":", "raw_data", "=", "fp", ".", "read", "(", ")", "ret", "=", "androconf", ".", "is_android_raw", "(", "raw_data", ")", "log", ".", "debug", "(", "\"Found filetype: '{}'\"", ".", "format", "(", "ret", ")", ")", "if", "not", "ret", ":", "return", "None", "if", "ret", "==", "\"APK\"", ":", "digest", ",", "_", "=", "self", ".", "addAPK", "(", "filename", ",", "raw_data", ")", "elif", "ret", "==", "\"DEX\"", ":", "digest", ",", "_", ",", "_", "=", "self", ".", "addDEX", "(", "filename", ",", "raw_data", ",", "dx", ")", "elif", "ret", "==", "\"DEY\"", ":", "digest", ",", "_", ",", "_", "=", "self", ".", "addDEY", "(", "filename", ",", "raw_data", ",", "dx", ")", "else", ":", "return", "None", "return", "digest" ]
35.27027
22.081081
def rate_limited(max_per_second): """ This decorator limits how often a method can get called in a second. If the limit is exceeded, the call will be held in a queue until enough time has passed. Useful when trying to avoid overloading a system with rapid calls. """ min_interval = 1.0 / float(max_per_second) def decorate(func): last_time_called = [0.0] rate_lock = threading.Lock() # To support multi-threading def rate_limited_function(*args, **kargs): try: rate_lock.acquire(True) elapsed = time.clock() - last_time_called[0] wait_time_remaining = min_interval - elapsed if wait_time_remaining > 0: time.sleep(wait_time_remaining) last_time_called[0] = time.clock() finally: rate_lock.release() return func(*args, **kargs) return rate_limited_function return decorate
[ "def", "rate_limited", "(", "max_per_second", ")", ":", "min_interval", "=", "1.0", "/", "float", "(", "max_per_second", ")", "def", "decorate", "(", "func", ")", ":", "last_time_called", "=", "[", "0.0", "]", "rate_lock", "=", "threading", ".", "Lock", "(", ")", "# To support multi-threading", "def", "rate_limited_function", "(", "*", "args", ",", "*", "*", "kargs", ")", ":", "try", ":", "rate_lock", ".", "acquire", "(", "True", ")", "elapsed", "=", "time", ".", "clock", "(", ")", "-", "last_time_called", "[", "0", "]", "wait_time_remaining", "=", "min_interval", "-", "elapsed", "if", "wait_time_remaining", ">", "0", ":", "time", ".", "sleep", "(", "wait_time_remaining", ")", "last_time_called", "[", "0", "]", "=", "time", ".", "clock", "(", ")", "finally", ":", "rate_lock", ".", "release", "(", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kargs", ")", "return", "rate_limited_function", "return", "decorate" ]
40.625
13.958333
def restore_db(release=None): """ Restores backup back to version, uses current version by default. """ if not release: release = paths.get_current_release_name() if not release: raise Exception("Release %s was not found" % release) backup_file = "postgresql/%s.sql.gz" % release backup_path = paths.get_backup_path(backup_file) if not env.exists(backup_path): raise Exception("Backup file %s not found" % backup_path) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, backup_path) )
[ "def", "restore_db", "(", "release", "=", "None", ")", ":", "if", "not", "release", ":", "release", "=", "paths", ".", "get_current_release_name", "(", ")", "if", "not", "release", ":", "raise", "Exception", "(", "\"Release %s was not found\"", "%", "release", ")", "backup_file", "=", "\"postgresql/%s.sql.gz\"", "%", "release", "backup_path", "=", "paths", ".", "get_backup_path", "(", "backup_file", ")", "if", "not", "env", ".", "exists", "(", "backup_path", ")", ":", "raise", "Exception", "(", "\"Backup file %s not found\"", "%", "backup_path", ")", "with", "context_managers", ".", "shell_env", "(", "PGPASSWORD", "=", "env", ".", "psql_password", ")", ":", "env", ".", "run", "(", "\"pg_restore --clean -h localhost -d %s -U %s '%s'\"", "%", "(", "env", ".", "psql_db", ",", "env", ".", "psql_user", ",", "backup_path", ")", ")" ]
29.652174
21.652174
def scatter(self, *args, **kwargs): ''' Creates a scatter plot of the given x and y items. Args: x (str or seq[float]) : values or field names of center x coordinates y (str or seq[float]) : values or field names of center y coordinates size (str or list[float]) : values or field names of sizes in screen units marker (str, or list[str]): values or field names of marker types color (color value, optional): shorthand to set both fill and line color source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source. An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created for the user automatically. **kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties` Examples: >>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red") >>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...) .. note:: When passing ``marker="circle"`` it is also possible to supply a ``radius`` value in data-space units. When configuring marker type from a data source column, *all* markers incuding circles may only be configured with ``size`` in screen units. ''' marker_type = kwargs.pop("marker", "circle") if isinstance(marker_type, string_types) and marker_type in _MARKER_SHORTCUTS: marker_type = _MARKER_SHORTCUTS[marker_type] # The original scatter implementation allowed circle scatters to set a # radius. We will leave this here for compatibility but note that it # only works when the marker type is "circle" (and not referencing a # data source column). Consider deprecating in the future. if marker_type == "circle" and "radius" in kwargs: return self.circle(*args, **kwargs) else: return self._scatter(*args, marker=marker_type, **kwargs)
[ "def", "scatter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "marker_type", "=", "kwargs", ".", "pop", "(", "\"marker\"", ",", "\"circle\"", ")", "if", "isinstance", "(", "marker_type", ",", "string_types", ")", "and", "marker_type", "in", "_MARKER_SHORTCUTS", ":", "marker_type", "=", "_MARKER_SHORTCUTS", "[", "marker_type", "]", "# The original scatter implementation allowed circle scatters to set a", "# radius. We will leave this here for compatibility but note that it", "# only works when the marker type is \"circle\" (and not referencing a", "# data source column). Consider deprecating in the future.", "if", "marker_type", "==", "\"circle\"", "and", "\"radius\"", "in", "kwargs", ":", "return", "self", ".", "circle", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "_scatter", "(", "*", "args", ",", "marker", "=", "marker_type", ",", "*", "*", "kwargs", ")" ]
46.622222
35.688889
def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, and :obj:`all_methods` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods = [] Tmins, Tmaxs = [], [] if self.CASRN in WagnerMcGarry.index: methods.append(WAGNER_MCGARRY) _, A, B, C, D, self.WAGNER_MCGARRY_Pc, self.WAGNER_MCGARRY_Tc, self.WAGNER_MCGARRY_Tmin = _WagnerMcGarry_values[WagnerMcGarry.index.get_loc(self.CASRN)].tolist() self.WAGNER_MCGARRY_coefs = [A, B, C, D] Tmins.append(self.WAGNER_MCGARRY_Tmin); Tmaxs.append(self.WAGNER_MCGARRY_Tc) if self.CASRN in WagnerPoling.index: methods.append(WAGNER_POLING) _, A, B, C, D, self.WAGNER_POLING_Tc, self.WAGNER_POLING_Pc, Tmin, self.WAGNER_POLING_Tmax = _WagnerPoling_values[WagnerPoling.index.get_loc(self.CASRN)].tolist() # Some Tmin values are missing; Arbitrary choice of 0.1 lower limit self.WAGNER_POLING_Tmin = Tmin if not np.isnan(Tmin) else self.WAGNER_POLING_Tmax*0.1 self.WAGNER_POLING_coefs = [A, B, C, D] Tmins.append(Tmin); Tmaxs.append(self.WAGNER_POLING_Tmax) if self.CASRN in AntoineExtended.index: methods.append(ANTOINE_EXTENDED_POLING) _, A, B, C, Tc, to, n, E, F, self.ANTOINE_EXTENDED_POLING_Tmin, self.ANTOINE_EXTENDED_POLING_Tmax = _AntoineExtended_values[AntoineExtended.index.get_loc(self.CASRN)].tolist() self.ANTOINE_EXTENDED_POLING_coefs = [Tc, to, A, B, C, n, E, F] Tmins.append(self.ANTOINE_EXTENDED_POLING_Tmin); Tmaxs.append(self.ANTOINE_EXTENDED_POLING_Tmax) if self.CASRN in AntoinePoling.index: methods.append(ANTOINE_POLING) _, A, B, C, self.ANTOINE_POLING_Tmin, self.ANTOINE_POLING_Tmax = _AntoinePoling_values[AntoinePoling.index.get_loc(self.CASRN)].tolist() self.ANTOINE_POLING_coefs = [A, B, C] Tmins.append(self.ANTOINE_POLING_Tmin); Tmaxs.append(self.ANTOINE_POLING_Tmax) if self.CASRN in Perrys2_8.index: methods.append(DIPPR_PERRY_8E) _, C1, C2, C3, C4, C5, self.Perrys2_8_Tmin, self.Perrys2_8_Tmax = _Perrys2_8_values[Perrys2_8.index.get_loc(self.CASRN)].tolist() self.Perrys2_8_coeffs = [C1, C2, C3, C4, C5] Tmins.append(self.Perrys2_8_Tmin); Tmaxs.append(self.Perrys2_8_Tmax) if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc) if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'P') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if self.CASRN in VDI_PPDS_3.index: _, Tm, Tc, Pc, A, B, C, D = _VDI_PPDS_3_values[VDI_PPDS_3.index.get_loc(self.CASRN)].tolist() self.VDI_PPDS_coeffs = [A, B, C, D] self.VDI_PPDS_Tc = Tc self.VDI_PPDS_Tm = Tm self.VDI_PPDS_Pc = Pc methods.append(VDI_PPDS) Tmins.append(self.VDI_PPDS_Tm); Tmaxs.append(self.VDI_PPDS_Tc) if all((self.Tb, self.Tc, self.Pc)): methods.append(BOILING_CRITICAL) Tmins.append(0.01); Tmaxs.append(self.Tc) if all((self.Tc, self.Pc, self.omega)): methods.append(LEE_KESLER_PSAT) methods.append(AMBROSE_WALTON) methods.append(SANJARI) methods.append(EDALAT) if self.eos: methods.append(EOS) Tmins.append(0.01); Tmaxs.append(self.Tc) self.all_methods = set(methods) if Tmins and Tmaxs: self.Tmin = min(Tmins) self.Tmax = max(Tmaxs)
[ "def", "load_all_methods", "(", "self", ")", ":", "methods", "=", "[", "]", "Tmins", ",", "Tmaxs", "=", "[", "]", ",", "[", "]", "if", "self", ".", "CASRN", "in", "WagnerMcGarry", ".", "index", ":", "methods", ".", "append", "(", "WAGNER_MCGARRY", ")", "_", ",", "A", ",", "B", ",", "C", ",", "D", ",", "self", ".", "WAGNER_MCGARRY_Pc", ",", "self", ".", "WAGNER_MCGARRY_Tc", ",", "self", ".", "WAGNER_MCGARRY_Tmin", "=", "_WagnerMcGarry_values", "[", "WagnerMcGarry", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "WAGNER_MCGARRY_coefs", "=", "[", "A", ",", "B", ",", "C", ",", "D", "]", "Tmins", ".", "append", "(", "self", ".", "WAGNER_MCGARRY_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "WAGNER_MCGARRY_Tc", ")", "if", "self", ".", "CASRN", "in", "WagnerPoling", ".", "index", ":", "methods", ".", "append", "(", "WAGNER_POLING", ")", "_", ",", "A", ",", "B", ",", "C", ",", "D", ",", "self", ".", "WAGNER_POLING_Tc", ",", "self", ".", "WAGNER_POLING_Pc", ",", "Tmin", ",", "self", ".", "WAGNER_POLING_Tmax", "=", "_WagnerPoling_values", "[", "WagnerPoling", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "# Some Tmin values are missing; Arbitrary choice of 0.1 lower limit", "self", ".", "WAGNER_POLING_Tmin", "=", "Tmin", "if", "not", "np", ".", "isnan", "(", "Tmin", ")", "else", "self", ".", "WAGNER_POLING_Tmax", "*", "0.1", "self", ".", "WAGNER_POLING_coefs", "=", "[", "A", ",", "B", ",", "C", ",", "D", "]", "Tmins", ".", "append", "(", "Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "WAGNER_POLING_Tmax", ")", "if", "self", ".", "CASRN", "in", "AntoineExtended", ".", "index", ":", "methods", ".", "append", "(", "ANTOINE_EXTENDED_POLING", ")", "_", ",", "A", ",", "B", ",", "C", ",", "Tc", ",", "to", ",", "n", ",", "E", ",", "F", ",", "self", ".", "ANTOINE_EXTENDED_POLING_Tmin", ",", "self", ".", "ANTOINE_EXTENDED_POLING_Tmax", "=", "_AntoineExtended_values", "[", "AntoineExtended", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "ANTOINE_EXTENDED_POLING_coefs", "=", "[", "Tc", ",", "to", ",", "A", ",", "B", ",", "C", ",", "n", ",", "E", ",", "F", "]", "Tmins", ".", "append", "(", "self", ".", "ANTOINE_EXTENDED_POLING_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "ANTOINE_EXTENDED_POLING_Tmax", ")", "if", "self", ".", "CASRN", "in", "AntoinePoling", ".", "index", ":", "methods", ".", "append", "(", "ANTOINE_POLING", ")", "_", ",", "A", ",", "B", ",", "C", ",", "self", ".", "ANTOINE_POLING_Tmin", ",", "self", ".", "ANTOINE_POLING_Tmax", "=", "_AntoinePoling_values", "[", "AntoinePoling", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "ANTOINE_POLING_coefs", "=", "[", "A", ",", "B", ",", "C", "]", "Tmins", ".", "append", "(", "self", ".", "ANTOINE_POLING_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "ANTOINE_POLING_Tmax", ")", "if", "self", ".", "CASRN", "in", "Perrys2_8", ".", "index", ":", "methods", ".", "append", "(", "DIPPR_PERRY_8E", ")", "_", ",", "C1", ",", "C2", ",", "C3", ",", "C4", ",", "C5", ",", "self", ".", "Perrys2_8_Tmin", ",", "self", ".", "Perrys2_8_Tmax", "=", "_Perrys2_8_values", "[", "Perrys2_8", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "Perrys2_8_coeffs", "=", "[", "C1", ",", "C2", ",", "C3", ",", "C4", ",", "C5", "]", "Tmins", ".", "append", "(", "self", ".", "Perrys2_8_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "Perrys2_8_Tmax", ")", "if", "has_CoolProp", "and", "self", ".", "CASRN", "in", "coolprop_dict", ":", "methods", ".", "append", "(", "COOLPROP", ")", "self", ".", "CP_f", "=", "coolprop_fluids", "[", "self", ".", "CASRN", "]", "Tmins", ".", "append", "(", "self", ".", "CP_f", ".", "Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "CP_f", ".", "Tc", ")", "if", "self", ".", "CASRN", "in", "_VDISaturationDict", ":", "methods", ".", "append", "(", "VDI_TABULAR", ")", "Ts", ",", "props", "=", "VDI_tabular_data", "(", "self", ".", "CASRN", ",", "'P'", ")", "self", ".", "VDI_Tmin", "=", "Ts", "[", "0", "]", "self", ".", "VDI_Tmax", "=", "Ts", "[", "-", "1", "]", "self", ".", "tabular_data", "[", "VDI_TABULAR", "]", "=", "(", "Ts", ",", "props", ")", "Tmins", ".", "append", "(", "self", ".", "VDI_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_Tmax", ")", "if", "self", ".", "CASRN", "in", "VDI_PPDS_3", ".", "index", ":", "_", ",", "Tm", ",", "Tc", ",", "Pc", ",", "A", ",", "B", ",", "C", ",", "D", "=", "_VDI_PPDS_3_values", "[", "VDI_PPDS_3", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "VDI_PPDS_coeffs", "=", "[", "A", ",", "B", ",", "C", ",", "D", "]", "self", ".", "VDI_PPDS_Tc", "=", "Tc", "self", ".", "VDI_PPDS_Tm", "=", "Tm", "self", ".", "VDI_PPDS_Pc", "=", "Pc", "methods", ".", "append", "(", "VDI_PPDS", ")", "Tmins", ".", "append", "(", "self", ".", "VDI_PPDS_Tm", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_PPDS_Tc", ")", "if", "all", "(", "(", "self", ".", "Tb", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ")", ")", ":", "methods", ".", "append", "(", "BOILING_CRITICAL", ")", "Tmins", ".", "append", "(", "0.01", ")", "Tmaxs", ".", "append", "(", "self", ".", "Tc", ")", "if", "all", "(", "(", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "omega", ")", ")", ":", "methods", ".", "append", "(", "LEE_KESLER_PSAT", ")", "methods", ".", "append", "(", "AMBROSE_WALTON", ")", "methods", ".", "append", "(", "SANJARI", ")", "methods", ".", "append", "(", "EDALAT", ")", "if", "self", ".", "eos", ":", "methods", ".", "append", "(", "EOS", ")", "Tmins", ".", "append", "(", "0.01", ")", "Tmaxs", ".", "append", "(", "self", ".", "Tc", ")", "self", ".", "all_methods", "=", "set", "(", "methods", ")", "if", "Tmins", "and", "Tmaxs", ":", "self", ".", "Tmin", "=", "min", "(", "Tmins", ")", "self", ".", "Tmax", "=", "max", "(", "Tmaxs", ")" ]
58.756757
24.756757
def _set_fields(self, json_dict): """ Set this object's attributes specified in json_dict """ for key, value in json_dict.items(): if not key.startswith("_"): setattr(self, key, value)
[ "def", "_set_fields", "(", "self", ",", "json_dict", ")", ":", "for", "key", ",", "value", "in", "json_dict", ".", "items", "(", ")", ":", "if", "not", "key", ".", "startswith", "(", "\"_\"", ")", ":", "setattr", "(", "self", ",", "key", ",", "value", ")" ]
44.8
2.6
def _escape_token(token, alphabet): """Escape away underscores and OOV characters and append '_'. This allows the token to be expressed as the concatenation of a list of subtokens from the vocabulary. The underscore acts as a sentinel which allows us to invertibly concatenate multiple such lists. Args: token: A unicode string to be escaped. alphabet: A set of all characters in the vocabulary's alphabet. Returns: escaped_token: An escaped unicode string. Raises: ValueError: If the provided token is not unicode. """ if not isinstance(token, six.text_type): raise ValueError("Expected string type for token, got %s" % type(token)) token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] return u"".join(ret) + "_"
[ "def", "_escape_token", "(", "token", ",", "alphabet", ")", ":", "if", "not", "isinstance", "(", "token", ",", "six", ".", "text_type", ")", ":", "raise", "ValueError", "(", "\"Expected string type for token, got %s\"", "%", "type", "(", "token", ")", ")", "token", "=", "token", ".", "replace", "(", "u\"\\\\\"", ",", "u\"\\\\\\\\\"", ")", ".", "replace", "(", "u\"_\"", ",", "u\"\\\\u\"", ")", "ret", "=", "[", "c", "if", "c", "in", "alphabet", "and", "c", "!=", "u\"\\n\"", "else", "r\"\\%d;\"", "%", "ord", "(", "c", ")", "for", "c", "in", "token", "]", "return", "u\"\"", ".", "join", "(", "ret", ")", "+", "\"_\"" ]
35.913043
23.478261
def read_tmy3(filename=None, coerce_year=None, recolumn=True): ''' Read a TMY3 file in to a pandas dataframe. Note that values contained in the metadata dictionary are unchanged from the TMY3 file (i.e. units are retained). In the case of any discrepencies between this documentation and the TMY3 User's Manual [1], the TMY3 User's Manual takes precedence. The TMY3 files were updated in Jan. 2015. This function requires the use of the updated files. Parameters ---------- filename : None or string, default None If None, attempts to use a Tkinter file browser. A string can be a relative file path, absolute file path, or url. coerce_year : None or int, default None If supplied, the year of the data will be set to this value. recolumn : bool, default True If True, apply standard names to TMY3 columns. Typically this results in stripping the units from the column name. Returns ------- Tuple of the form (data, metadata). data : DataFrame A pandas dataframe with the columns described in the table below. For more detailed descriptions of each component, please consult the TMY3 User's Manual ([1]), especially tables 1-1 through 1-6. metadata : dict The site metadata available in the file. Notes ----- The returned structures have the following fields. =============== ====== =================== key format description =============== ====== =================== altitude Float site elevation latitude Float site latitudeitude longitude Float site longitudeitude Name String site name State String state TZ Float UTC offset USAF Int USAF identifier =============== ====== =================== ============================= ====================================================================================================================================================== TMYData field description ============================= ====================================================================================================================================================== TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included) TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHISource See [1], Table 1-4 TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2 TMYData.DNISource See [1], Table 1-4 TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.DHISource See [1], Table 1-4 TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.GHillumSource See [1], Table 1-4 TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DNillumSource See [1], Table 1-4 TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DHillumSource See [1], Table 1-4 TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2 TMYData.ZenithlumSource See [1], Table 1-4 TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10 TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.TotCldUnertainty See [1], Table 1-6 TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.OpqCldUncertainty See [1], Table 1-6 TMYData.DryBulb Dry bulb temperature at the time indicated, deg C TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DryBulbUncertainty See [1], Table 1-6 TMYData.DewPoint Dew-point temperature at the time indicated, deg C TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DewPointUncertainty See [1], Table 1-6 TMYData.RHum Relatitudeive humidity at the time indicated, percent TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.RHumUncertainty See [1], Table 1-6 TMYData.Pressure Station pressure at the time indicated, 1 mbar TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PressureUncertainty See [1], Table 1-6 TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm) TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WdirUncertainty See [1], Table 1-6 TMYData.Wspd Wind speed at the time indicated, meter/second TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WspdUncertainty See [1], Table 1-6 TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.HvisUncertainty See [1], Table 1-6 TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.CeilHgtUncertainty See [1], Table 1-6 TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PwatUncertainty See [1], Table 1-6 TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AODUncertainty See [1], Table 1-6 TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AlbUncertainty See [1], Table 1-6 TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.LprecipUncertainty See [1], Table 1-6 TMYData.PresWth Present weather code, see [2]. TMYData.PresWthSource Present weather code source, see [2]. TMYData.PresWthUncertainty Present weather code uncertainty, see [2]. ============================= ====================================================================================================================================================== References ---------- [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets". NREL/TP-581-43156, Revised May 2008. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005 Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364. ''' if filename is None: try: filename = _interactive_load() except ImportError: raise ImportError('Interactive load failed. Tkinter not supported ' 'on this system. Try installing X-Quartz and ' 'reloading') head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude'] if filename.startswith('http'): request = Request(filename, headers={'User-Agent': ( 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 ' 'Safari/537.36')}) response = urlopen(request) csvdata = io.StringIO(response.read().decode(errors='ignore')) else: # assume it's accessible via the file system csvdata = open(filename, 'r') # read in file metadata, advance buffer to second line firstline = csvdata.readline() if 'Request Rejected' in firstline: raise IOError('Remote server rejected TMY file request') meta = dict(zip(head, firstline.rstrip('\n').split(","))) # convert metadata strings to numeric types meta['altitude'] = float(meta['altitude']) meta['latitude'] = float(meta['latitude']) meta['longitude'] = float(meta['longitude']) meta['TZ'] = float(meta['TZ']) meta['USAF'] = int(meta['USAF']) # use pandas to read the csv file/stringio buffer # header is actually the second line in file, but tell pandas to look for # header information on the 1st line (0 indexing) because we've already # advanced past the true first line with the readline call above. data = pd.read_csv( csvdata, header=0, parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']}, date_parser=lambda *x: _parsedate(*x, year=coerce_year), index_col='datetime') if recolumn: data = _recolumn(data) # rename to standard column names data = data.tz_localize(int(meta['TZ'] * 3600)) return data, meta
[ "def", "read_tmy3", "(", "filename", "=", "None", ",", "coerce_year", "=", "None", ",", "recolumn", "=", "True", ")", ":", "if", "filename", "is", "None", ":", "try", ":", "filename", "=", "_interactive_load", "(", ")", "except", "ImportError", ":", "raise", "ImportError", "(", "'Interactive load failed. Tkinter not supported '", "'on this system. Try installing X-Quartz and '", "'reloading'", ")", "head", "=", "[", "'USAF'", ",", "'Name'", ",", "'State'", ",", "'TZ'", ",", "'latitude'", ",", "'longitude'", ",", "'altitude'", "]", "if", "filename", ".", "startswith", "(", "'http'", ")", ":", "request", "=", "Request", "(", "filename", ",", "headers", "=", "{", "'User-Agent'", ":", "(", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '", "'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '", "'Safari/537.36'", ")", "}", ")", "response", "=", "urlopen", "(", "request", ")", "csvdata", "=", "io", ".", "StringIO", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "errors", "=", "'ignore'", ")", ")", "else", ":", "# assume it's accessible via the file system", "csvdata", "=", "open", "(", "filename", ",", "'r'", ")", "# read in file metadata, advance buffer to second line", "firstline", "=", "csvdata", ".", "readline", "(", ")", "if", "'Request Rejected'", "in", "firstline", ":", "raise", "IOError", "(", "'Remote server rejected TMY file request'", ")", "meta", "=", "dict", "(", "zip", "(", "head", ",", "firstline", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "\",\"", ")", ")", ")", "# convert metadata strings to numeric types", "meta", "[", "'altitude'", "]", "=", "float", "(", "meta", "[", "'altitude'", "]", ")", "meta", "[", "'latitude'", "]", "=", "float", "(", "meta", "[", "'latitude'", "]", ")", "meta", "[", "'longitude'", "]", "=", "float", "(", "meta", "[", "'longitude'", "]", ")", "meta", "[", "'TZ'", "]", "=", "float", "(", "meta", "[", "'TZ'", "]", ")", "meta", "[", "'USAF'", "]", "=", "int", "(", "meta", "[", "'USAF'", "]", ")", "# use pandas to read the csv file/stringio buffer", "# header is actually the second line in file, but tell pandas to look for", "# header information on the 1st line (0 indexing) because we've already", "# advanced past the true first line with the readline call above.", "data", "=", "pd", ".", "read_csv", "(", "csvdata", ",", "header", "=", "0", ",", "parse_dates", "=", "{", "'datetime'", ":", "[", "'Date (MM/DD/YYYY)'", ",", "'Time (HH:MM)'", "]", "}", ",", "date_parser", "=", "lambda", "*", "x", ":", "_parsedate", "(", "*", "x", ",", "year", "=", "coerce_year", ")", ",", "index_col", "=", "'datetime'", ")", "if", "recolumn", ":", "data", "=", "_recolumn", "(", "data", ")", "# rename to standard column names", "data", "=", "data", ".", "tz_localize", "(", "int", "(", "meta", "[", "'TZ'", "]", "*", "3600", ")", ")", "return", "data", ",", "meta" ]
60.031414
36.670157
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None): """ Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3} :param inv_link_f: latent variables link(f) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array """ e = y - inv_link_f d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) / ((e**2 + self.sigma2*self.v)**3) ) return d3lik_dlink3
[ "def", "d3logpdf_dlink3", "(", "self", ",", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "e", "=", "y", "-", "inv_link_f", "d3lik_dlink3", "=", "(", "-", "(", "2", "*", "(", "self", ".", "v", "+", "1", ")", "*", "(", "-", "e", ")", "*", "(", "e", "**", "2", "-", "3", "*", "self", ".", "v", "*", "self", ".", "sigma2", ")", ")", "/", "(", "(", "e", "**", "2", "+", "self", ".", "sigma2", "*", "self", ".", "v", ")", "**", "3", ")", ")", "return", "d3lik_dlink3" ]
45.45
28.45
def load_config(name='urls.conf'): """Load a config from a resource file. The resource is found using `pkg_resources.resource_stream()`_, relative to the calling module. See :func:`parse_config` for config file details. :param name: The name of the resource, relative to the calling module. .. _pkg_resources.resource_stream(): http://packages.python.org/distribute/pkg_resources.html#basic-resource-access """ module = _calling_scope(2) config = resource_stream(module.__name__, name) return parse_config(config, module)
[ "def", "load_config", "(", "name", "=", "'urls.conf'", ")", ":", "module", "=", "_calling_scope", "(", "2", ")", "config", "=", "resource_stream", "(", "module", ".", "__name__", ",", "name", ")", "return", "parse_config", "(", "config", ",", "module", ")" ]
36.733333
23.066667
def first_field(self): """ Returns the first :class:`Field` in the `Sequence` or ``None`` for an empty `Sequence`. """ for name, item in enumerate(self): # Container if is_container(item): field = item.first_field() # Container is not empty if field is not None: return field # Field elif is_field(item): return item else: raise MemberTypeError(self, item, name) return None
[ "def", "first_field", "(", "self", ")", ":", "for", "name", ",", "item", "in", "enumerate", "(", "self", ")", ":", "# Container", "if", "is_container", "(", "item", ")", ":", "field", "=", "item", ".", "first_field", "(", ")", "# Container is not empty", "if", "field", "is", "not", "None", ":", "return", "field", "# Field", "elif", "is_field", "(", "item", ")", ":", "return", "item", "else", ":", "raise", "MemberTypeError", "(", "self", ",", "item", ",", "name", ")", "return", "None" ]
32.823529
9.705882
def replace(self, re_text, replace_str, text): """ 正则表达式替换 :param re_text: 正则表达式 :param replace_str: 替换字符串 :param text: 搜索文档 :return: 替换后的字符串 """ return re.sub(re_text, replace_str, text)
[ "def", "replace", "(", "self", ",", "re_text", ",", "replace_str", ",", "text", ")", ":", "return", "re", ".", "sub", "(", "re_text", ",", "replace_str", ",", "text", ")" ]
27
9.888889
def index(self, sub, start=None, end=None): """Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.index(sub, start, end)
[ "def", "index", "(", "self", ",", "sub", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "return", "self", ".", "value_no_colors", ".", "index", "(", "sub", ",", "start", ",", "end", ")" ]
42
10.875
def load_csv(filename, dialect='excel', encoding='utf-8'): """Load and return formal context from CSV file. Args: filename: Path to the CSV file to load the context from. dialect: Syntax variant of the CSV file (``'excel'``, ``'excel-tab'``). encoding (str): Encoding of the file (``'utf-8'``, ``'latin1'``, ``'ascii'``, ...). Example: >>> load_csv('examples/vowels.csv') # doctest: +ELLIPSIS <Context object mapping 12 objects to 8 properties [a717eee4] at 0x...> """ return Context.fromfile(filename, 'csv', encoding, dialect=dialect)
[ "def", "load_csv", "(", "filename", ",", "dialect", "=", "'excel'", ",", "encoding", "=", "'utf-8'", ")", ":", "return", "Context", ".", "fromfile", "(", "filename", ",", "'csv'", ",", "encoding", ",", "dialect", "=", "dialect", ")" ]
45.153846
28.153846
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None): """Submit work to be executed in the background. :param work: The work to execute. :param workunit_parent: If specified, work is accounted for under this workunit. :param on_success: If specified, a callable taking a single argument, which will be a list of return values of each invocation, in order. Called only if all work succeeded. :param on_failure: If specified, a callable taking a single argument, which is an exception thrown in the work. :return: `multiprocessing.pool.MapResult` Don't do work in on_success: not only will it block the result handling thread, but that thread is not a worker and doesn't have a logging context etc. Use it just to submit further work to the pool. """ if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables. if on_success: on_success([]) else: def do_work(*args): self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent, on_failure=on_failure) return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
[ "def", "submit_async_work", "(", "self", ",", "work", ",", "workunit_parent", "=", "None", ",", "on_success", "=", "None", ",", "on_failure", "=", "None", ")", ":", "if", "work", "is", "None", "or", "len", "(", "work", ".", "args_tuples", ")", "==", "0", ":", "# map_async hangs on 0-length iterables.", "if", "on_success", ":", "on_success", "(", "[", "]", ")", "else", ":", "def", "do_work", "(", "*", "args", ")", ":", "self", ".", "_do_work", "(", "work", ".", "func", ",", "*", "args", ",", "workunit_name", "=", "work", ".", "workunit_name", ",", "workunit_parent", "=", "workunit_parent", ",", "on_failure", "=", "on_failure", ")", "return", "self", ".", "_pool", ".", "map_async", "(", "do_work", ",", "work", ".", "args_tuples", ",", "chunksize", "=", "1", ",", "callback", "=", "on_success", ")" ]
52.541667
31.25
def _set_rbridge_id(self, v, load=False): """ Setter method for rbridge_id, mapped from YANG variable /preprovision/rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rbridge_id wwn",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id wwn', extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rbridge_id must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rbridge_id wwn",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id wwn', extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='list', is_config=True)""", }) self.__rbridge_id = t if hasattr(self, '_set'): self._set()
[ "def", "_set_rbridge_id", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"rbridge_id wwn\"", ",", "rbridge_id", ".", "rbridge_id", ",", "yang_name", "=", "\"rbridge-id\"", ",", "rest_name", "=", "\"rbridge-id\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'rbridge-id wwn'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Rbridge Id for Pre-provision configuration'", ",", "u'callpoint'", ":", "u'switch_attributes_callpoint'", ",", "u'display-when'", ":", "u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))'", ",", "u'cli-mode-name'", ":", "u'config-preprovision-rbridge-id-$(rbridge-id)'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"rbridge-id\"", ",", "rest_name", "=", "\"rbridge-id\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Rbridge Id for Pre-provision configuration'", ",", "u'callpoint'", ":", "u'switch_attributes_callpoint'", ",", "u'display-when'", ":", "u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))'", ",", "u'cli-mode-name'", ":", "u'config-preprovision-rbridge-id-$(rbridge-id)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-preprovision'", ",", "defining_module", "=", "'brocade-preprovision'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"rbridge_id must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"rbridge_id wwn\",rbridge_id.rbridge_id, yang_name=\"rbridge-id\", rest_name=\"rbridge-id\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id wwn', extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}), is_container='list', yang_name=\"rbridge-id\", rest_name=\"rbridge-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__rbridge_id", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
135.090909
64.954545