code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def split_list_by(lst, key): first, second = [], [] for item in lst: if key(item): second.append(item) else: first.append(item) return (first, second)
Splits a list by the callable *key* where a negative result will cause the item to be put in the first list and a positive into the second list.
def reraise(tpe, value, tb=None): " Reraise an exception from an exception info tuple. " Py3 = (sys.version_info[0] == 3) if value is None: value = tpe() if Py3: if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: exec('raise tpe, value, tb'f reraise(tpe, value, tb=None): " Reraise an exception from an exception info tuple. " Py3 = (sys.version_info[0] == 3) if value is None: value = tpe() if Py3: if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: exec('raise tpe, value, tb')
Reraise an exception from an exception info tuple.
def result(self): if self.__cancelled: raise Job.Cancelled elif self.__state in (Job.PENDING, Job.RUNNING): raise Job.InvalidState('job is {0}'.format(self.__state)) elif self.__state == Job.ERROR: reraise(*self.__exception) elif self.__state == Job.SUCCESS: return self.__result else: raise RuntimeError('invalid job state {0!r}'.format(self.__state))
The result of the jobs execution. Accessing this property while the job is pending or running will raise #InvalidState. If an exception occured during the jobs execution, it will be raised. # Raises InvalidState: If the job is not in state #FINISHED. Cancelled: If the job was cancelled. any: If an exception ocurred during the job's execution.
def exception(self): if self.__state in (Job.PENDING, Job.RUNNING): raise self.InvalidState('job is {0}'.format(self.__state)) elif self.__state == Job.ERROR: assert self.__exception is not None return self.__exception elif self.__state in (Job.RUNNING, Job.SUCCESS, Job.CANCELLED): assert self.__exception is None return None else: raise RuntimeError('invalid job state {0!r}'.format(self.__state))
The exception that occured while the job executed. The value is #None if no exception occurred. # Raises InvalidState: If the job is #PENDING or #RUNNING.
def finished(self): return self.__state in (Job.ERROR, Job.SUCCESS, Job.CANCELLED)
True if the job run and finished. There is no difference if the job finished successfully or errored.
def get(self, default=None): if not self.__cancelled and self.__state == Job.SUCCESS: return self.__result else: return default
Get the result of the Job, or return *default* if the job is not finished or errored. This function will never explicitly raise an exception. Note that the *default* value is also returned if the job was cancelled. # Arguments default (any): The value to return when the result can not be obtained.
def cancel(self): with synchronized(self): cancelled = self.__cancelled if not cancelled: self.__cancelled = True notify_all(self) if not cancelled: self._trigger_event(Job.CANCELLED)
Cancels the job. Functions should check the #Job.cancelled flag from time to time to be able to abort pre-emptively if the job was cancelled instead of running forever.
def _trigger_event(self, event): if event is None or event not in self.__listeners: raise ValueError('invalid event type: {0!r}'.format(event)) # Check the event has not already been triggered, then mark # the event as triggered. if event in self.__event_set: raise RuntimeError('event already triggered: {0!r}'.format(event)) self.__event_set.add(event) listeners = self.__listeners[event] + self.__listeners[None] # Remove one-off listeners. self.__listeners[event][:] = (l for l in self.__listeners[event] if not l.once) self.__listeners[None][:] = (l for l in self.__listeners[None] if not l.once) for listener in listeners: # XXX: What to do on exceptions? Catch and make sure all listeners # run through? What to do with the exception(s) then? listener.callback(self, event)
Private. Triggers and event and removes all one-off listeners for that event.
def add_listener(self, event, callback, once=False): if not callable(callback): raise TypeError('callback must be callable') if isinstance(event, str): event = [event] for evn in event: if evn not in self.__listeners: raise ValueError('invalid event type: {0!r}'.format(evn)) for evn in event: event_passed = False with synchronized(self): event_passed = (evn in self.__event_set) if not (once and event_passed): self.__listeners[evn].append(Job._Listener(callback, once)) # If the event already happened, we'll invoke the callback # immediately to make up for what it missed. if event_passed: callback(self, event)
Register a *callback* for the specified *event*. The function will be called with the #Job as its first argument. If *once* is #True, the listener will be removed after it has been invoked once or when the job is re-started. Note that if the event already ocurred, *callback* will be called immediately! # Arguments event (str, list of str): The name or multiple names of an event, or None to register the callback to be called for any event. callback (callable): A function. once (bool): Whether the callback is valid only once.
def wait(self, timeout=None): def cond(self): return self.__state not in (Job.PENDING, Job.RUNNING) or self.__cancelled if not wait_for_condition(self, cond, timeout): raise Job.Timeout return self.result
Waits for the job to finish and returns the result. # Arguments timeout (number, None): A number of seconds to wait for the result before raising a #Timeout exception. # Raises Timeout: If the timeout limit is exceeded.
def run(self): if self.__target is not None: return self.__target(self, *self.__args, **self.__kwargs) raise NotImplementedError
This method is the actual implementation of the job. By default, it calls the target function specified in the #Job constructor.
def factory(start_immediately=True): def decorator(func): def wrapper(*args, **kwargs): job = Job(task=lambda j: func(j, *args, **kwargs)) if start_immediately: job.start() return job return wrapper return decorator
This is a decorator function that creates new `Job`s with the wrapped function as the target. # Example ```python @Job.factory() def some_longish_function(job, seconds): time.sleep(seconds) return 42 job = some_longish_function(2) print(job.wait()) ``` # Arguments start_immediately (bool): #True if the factory should call #Job.start() immediately, #False if it should return the job in pending state.
def start(self): if self.__running: raise RuntimeError('ThreadPool already running') [t.start() for t in self.__threads] self.__running = True
Starts the #ThreadPool. Must be ended with #stop(). Use the context-manager interface to ensure starting and the #ThreadPool.
def current_jobs(self): jobs = [] with synchronized(self.__queue): for worker in self.__threads: with synchronized(worker): if worker.current: jobs.append(worker.current) return jobs
Returns a snapshot of the Jobs that are currently being processed by the ThreadPool. These jobs can not be found in the #pending_jobs() list.
def clear(self): with synchronized(self.__queue): jobs = self.__queue.snapshot() self.__queue.clear() return jobs
Removes all pending Jobs from the queue and return them in a list. This method does **no**t call #Job.cancel() on any of the jobs. If you want that, use #cancel_all() or call it manually.
def cancel_all(self, cancel_current=True): with synchronized(self.__queue): jobs = self.clear() if cancel_current: jobs.extend(self.current_jobs()) [j.cancel() for j in jobs] return jobs
Similar to #clear(), but this function also calls #Job.cancel() on all jobs. Also, it **includes** all jobs that are currently being executed if *cancel_current* is True. # Arguments cancel_current (bool): Also cancel currently running jobs and include them in the returned list of jobs. # Returns list: A list of the #Job#s that were canceled.
def submit(self, target=None, task=None, args=(), kwargs=None, front=False, dispose_inputs=None): if not self.__running: raise RuntimeError("ThreadPool ain't running") if dispose_inputs is None: dispose_inputs = self.dispose_inputs if isinstance(task, Job): if args or kwargs: raise TypeError('can not provide additional arguments for Job') if task.state != Job.PENDING: raise RuntimeError('job is not pending') job = task elif task is not None: if kwargs is None: kwargs = {} job = Job(task=task, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs) elif target is not None: if kwargs is None: kwargs = {} job = Job(target=target, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs) else: raise TypeError('expected Job or callable') job.print_exc = self.print_exc if front: self.__queue.appendleft(job) else: self.__queue.append(job) return job
Submit a new #Job to the ThreadPool. # Arguments task (function, Job): Either a function that accepts a #Job, *args* and *kwargs* or a #Job object that is in #~Job.PENDING state. target (function): A function object that accepts *args* and *kwargs*. Only if *task* is not specified. args (list, tuple): A list of arguments to be passed to *job*, if it is a function. kwargs (dict): A dictionary to be passed as keyword arguments to *job*, if it is a function. front (bool): If #True, the job will be inserted in the front of the queue. # Returns Job: The job that was added to the queue. # Raises TypeError: If a #Job object was passed but *args* or *kwargs* are non-empty. RuntimeError: If the ThreadPool is not running (ie. if it was shut down).
def wait(self, timeout=None): if not self.__running: raise RuntimeError("ThreadPool ain't running") self.__queue.wait(timeout)
Block until all jobs in the ThreadPool are finished. Beware that this can make the program run into a deadlock if another thread adds new jobs to the pool! # Raises Timeout: If the timeout is exceeded.
def shutdown(self, wait=True): if self.__running: # Add a Non-entry for every worker thread we have. for thread in self.__threads: assert thread.isAlive() self.__queue.append(None) self.__running = False if wait: self.__queue.wait() for thread in self.__threads: thread.join()
Shut down the ThreadPool. # Arguments wait (bool): If #True, wait until all worker threads end. Note that pending jobs are still executed. If you want to cancel any pending jobs, use the #clear() or #cancel_all() methods.
def submit_multiple(self, functions, target=False, task=False): if target or not task: return JobCollection([self.submit(target=func) for func in functions]) else: return JobCollection([self.submit(task=func) for func in functions])
Submits a #Job for each element in *function* and returns a #JobCollection.
def new_event_type(self, name, mergeable=False): ''' Declare a new event. May overwrite an existing entry. ''' self.event_types[name] = self.EventType(name, mergeablef new_event_type(self, name, mergeable=False): ''' Declare a new event. May overwrite an existing entry. ''' self.event_types[name] = self.EventType(name, mergeable)
Declare a new event. May overwrite an existing entry.
def add_event(self, name, data=None): ''' Add an event of type *name* to the queue. May raise a `ValueError` if the event type is mergeable and *data* is not None or if *name* is not a declared event type (in strict mode). ''' try: mergeable = self.event_types[name].mergeable except KeyError: if self.strict: raise ValueError('unknown event type {0!r}'.format(name)) mergeable = False if mergeable and data is not None: raise ValueError('mergable event can not have data attached') with self.lock: if mergeable: # Check if such an event already exists. for ev in self.events: if ev.type == name: return self.events.append(self.Event(name, data, time.clock())f add_event(self, name, data=None): ''' Add an event of type *name* to the queue. May raise a `ValueError` if the event type is mergeable and *data* is not None or if *name* is not a declared event type (in strict mode). ''' try: mergeable = self.event_types[name].mergeable except KeyError: if self.strict: raise ValueError('unknown event type {0!r}'.format(name)) mergeable = False if mergeable and data is not None: raise ValueError('mergable event can not have data attached') with self.lock: if mergeable: # Check if such an event already exists. for ev in self.events: if ev.type == name: return self.events.append(self.Event(name, data, time.clock()))
Add an event of type *name* to the queue. May raise a `ValueError` if the event type is mergeable and *data* is not None or if *name* is not a declared event type (in strict mode).
def pop_event(self): ''' Pop the next queued event from the queue. :raise ValueError: If there is no event queued. ''' with self.lock: if not self.events: raise ValueError('no events queued') return self.events.popleft(f pop_event(self): ''' Pop the next queued event from the queue. :raise ValueError: If there is no event queued. ''' with self.lock: if not self.events: raise ValueError('no events queued') return self.events.popleft()
Pop the next queued event from the queue. :raise ValueError: If there is no event queued.
def pop_events(self): ''' Pop all events and return a `collections.deque` object. The returned container can be empty. This method is preferred over `pop_event()` as it is much faster as the lock has to be acquired only once and also avoids running into an infinite loop during event processing. ''' with self.lock: events = self.events self.events = collections.deque() return eventf pop_events(self): ''' Pop all events and return a `collections.deque` object. The returned container can be empty. This method is preferred over `pop_event()` as it is much faster as the lock has to be acquired only once and also avoids running into an infinite loop during event processing. ''' with self.lock: events = self.events self.events = collections.deque() return events
Pop all events and return a `collections.deque` object. The returned container can be empty. This method is preferred over `pop_event()` as it is much faster as the lock has to be acquired only once and also avoids running into an infinite loop during event processing.
def clear(self): self._tasks -= len(self._deque) self._deque.clear() notify_all(self)
Clears the queue. Note that calling #wait*( immediately after clear can still block when tasks are currently being processed since this method can only clear queued items.
def get(self, block=True, timeout=None, method='pop'): if method not in ('pop', 'popleft'): raise ValueError('method must be "pop" or "popleft": {0!r}'.format(method)) t_start = time.clock() while not self: if not block: raise self.Empty if timeout is None: wait(self) else: t_delta = time.clock() - t_start if t_delta > timeout: raise Timeout wait(self, timeout - t_delta) return getattr(self, method)()
If *block* is True, this method blocks until an element can be removed from the deque with the specified *method*. If *block* is False, the function will raise #Empty if no elements are available. # Arguments block (bool): #True to block and wait until an element becomes available, #False otherwise. timeout (number, None): The timeout in seconds to use when waiting for an element (only with `block=True`). method (str): The name of the method to use to remove an element from the queue. Must be either `'pop'` or `'popleft'`. # Raises ValueError: If *method* has an invalid value. Timeout: If the *timeout* is exceeded.
def wait(self, timeout=None): t_start = time.clock() if not wait_for_condition(self, lambda s: s._tasks == 0, timeout): raise Timeout
Waits until all tasks completed or *timeout* seconds passed. # Raises Timeout: If the *timeout* is exceeded.
def sleep(self): current = time.time() if self.last < 0: self.last = current return delta = current - self.last if delta < self.seconds: time.sleep(self.seconds - delta) self.last = time.time()
Sleeps until the interval has passed since the last time this function was called. This is a synonym for #__call__(). The first time the function is called will return immediately and not block. Therefore, it is important to put the call at the beginning of the timed block, like this: # Example ```python clock = Clock(fps=50) while True: clock.sleep() # Processing ... ```
def read_config(desired_type: Type[ConfigParser], file_object: TextIOBase, logger: Logger, *args, **kwargs) -> ConfigParser: # see https://docs.python.org/3/library/configparser.html for details config = ConfigParser() config.read_file(file_object) return config
Helper method to read a configuration file according to the 'configparser' format, and return it as a dictionary of dictionaries (section > [property > value]) :param file_object: :return:
def get_default_config_parsers() -> List[AnyParser]: return [SingleFileParserFunction(parser_function=read_config, streaming_mode=True, supported_exts={'.cfg', '.ini'}, supported_types={ConfigParser}), ]
Utility method to return the default parsers able to parse a dictionary from a file. :return:
def config_to_dict_of_dict(desired_type: Type[T], config: ConfigParser, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> DictOfDict: # return dict(config) # get the base collection type if provided base_typ, discarded = _extract_collection_base_type(desired_type, exception_if_none=False) # if none, at least declare dict base_typ = base_typ or Dict # convert the whole config to a dictionary by flattening all sections. If a key is found twice in two different # sections an error is raised results = dict() for section, props in config.items(): # convert all values of the sub-dictionary results[section] = ConversionFinder.convert_collection_values_according_to_pep(props, base_typ, conversion_finder, logger, **kwargs) return results
Helper method to read a configuration file according to the 'configparser' format, and return it as a dictionary of dictionaries [section > [property > value]]. :param file_object: :return:
def merge_all_config_sections_into_a_single_dict(desired_type: Type[T], config: ConfigParser, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: # convert the whole config to a dictionary by flattening all sections. If a key is found twice in two different # sections an error is raised results = dict() for section, props in config.items(): for key, value in props.items(): if key in results.keys(): # find all sections where it appears sections_where_it_appears = [s for s, p in config.items() if key in p.keys()] raise MultipleKeyOccurenceInConfigurationError.create(key, sections_where_it_appears) else: results[key] = value return ConversionFinder.convert_collection_values_according_to_pep(results, desired_type, conversion_finder, logger, **kwargs)
Helper method to convert a 'configparser' into a dictionary [property > value]. Properties from all sections are collected. If the same key appears in several sections, an error will be thrown :param file_object: :return:
def get_default_config_converters(conv_finder: ConversionFinder) -> List[Union[Converter[Any, ConfigParser], Converter[ConfigParser, Any]]]: return [ConverterFunction(ConfigParser, DictOfDict, config_to_dict_of_dict, custom_name='config_to_dict_of_dict', function_args={'conversion_finder': conv_finder}), ConverterFunction(ConfigParser, dict, merge_all_config_sections_into_a_single_dict, custom_name='merge_all_config_sections_into_a_single_dict', function_args={'conversion_finder': conv_finder})]
Utility method to return the default converters associated to ConfigParser (from ConfigParser to other type, and from other type to ConfigParser) :return:
def create(key_name: str, sections: List[str]): # -> NoParserFoundForObject: return MultipleKeyOccurenceInConfigurationError('Cannot read the provided config file as a flat dictionary : ' 'key \'' + key_name + '\' appears several times, in sections' '\'' + str(sections) + '\'.')
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param key_name: :param sections: :return:
def logger(message, level=10): logging.getLogger(__name__).log(level, str(message))
Handle logging.
async def get_data(self): try: await self.get_session_data() await self.get_home_data() await self.get_users() await self.get_user_data() except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror): msg = "Can not load data from Tautulli." logger(msg, 40)
Get Tautulli data.
async def get_session_data(self): cmd = 'get_activity' url = self.base_url + cmd try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger("Status from Tautulli: " + str(response.status)) self.tautulli_session_data = await response.json() logger(self.tautulli_session_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = "Can not load data from Tautulli: {} - {}".format(url, error) logger(msg, 40)
Get Tautulli sessions.
async def get_home_data(self): cmd = 'get_home_stats' url = self.base_url + cmd data = {} try: async with async_timeout.timeout(8, loop=self._loop): request = await self._session.get(url) response = await request.json() for stat in response.get('response', {}).get('data', {}): if stat.get('stat_id') == 'top_movies': try: row = stat.get('rows', {})[0] data['movie'] = row.get('title') except (IndexError, KeyError): data['movie'] = None if stat.get('stat_id') == 'top_tv': try: row = stat.get('rows', {})[0] data['tv'] = row.get('title') except (IndexError, KeyError): data['tv'] = None if stat.get('stat_id') == 'top_users': try: row = stat.get('rows', {})[0] data['user'] = row.get('user') except (IndexError, KeyError): data['user'] = None logger("Status from Tautulli: " + str(request.status)) self.tautulli_home_data = data logger(self.tautulli_home_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = "Can not load data from Tautulli: {} - {}".format(url, error) logger(msg, 40)
Get Tautulli home stats.
async def get_users(self): cmd = 'get_users' url = self.base_url + cmd users = [] try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger("Status from Tautulli: " + str(response.status)) all_user_data = await response.json() for user in all_user_data['response']['data']: if user['username'] != 'Local': users.append(user['username']) self.tautulli_users = users logger(self.tautulli_users) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = "Can not load data from Tautulli: {} - {}".format(url, error) logger(msg, 40)
Get Tautulli users.
async def get_user_data(self): userdata = {} sessions = self.session_data.get('sessions', {}) try: async with async_timeout.timeout(8, loop=self._loop): for username in self.tautulli_users: userdata[username] = {} userdata[username]['Activity'] = None for session in sessions: if session['username'].lower() == username.lower(): userdata[username]['Activity'] = session['state'] for key in session: if key != 'Username': userdata[username][key] = session[key] break self.tautulli_user_data = userdata except (asyncio.TimeoutError, aiohttp.ClientError, KeyError): msg = "Can not load data from Tautulli." logger(msg, 40)
Get Tautulli userdata.
def import_string(impstr, attr=None): if "::" in impstr: impstr, attr = impstr.split("::") imported = wz_import_string(impstr) if attr is not None: return getobjpath(imported, attr) return imported
Imports a string. Can import an attribute of the imported class/module using a double colon as a separator
def getobjpath(obj, path): if not path: return obj if path.startswith("["): item = path[1:path.index("]")] return getobjpath(obj[item], path[len(item) + 2:]) if path.startswith("."): path = path[1:] if "." in path or "[" in path: dot_idx = path.find(".") bracket_idx = path.find("[") if dot_idx == -1 or bracket_idx < dot_idx: idx = bracket_idx next_idx = idx else: idx = dot_idx next_idx = idx + 1 attr = path[:idx] return getobjpath(getattr(obj, attr), path[next_idx:]) return getattr(obj, path)
Returns an item or attribute of the object recursively. Item names are specified between brackets, eg: [item]. Attribute names are prefixed with a dot (the first one is optional), eg: .attr Example: getobjpath(obj, "attr1.attr2[item].attr3")
def find_classes_in_module(module, clstypes): classes = [] for item in dir(module): item = getattr(module, item) try: for cls in clstypes: if issubclass(item, cls) and item != cls: classes.append(item) except Exception as e: pass return classes
Find classes of clstypes in module
def remove_yaml_frontmatter(source, return_frontmatter=False): if source.startswith("---\n"): frontmatter_end = source.find("\n---\n", 4) if frontmatter_end == -1: frontmatter = source source = "" else: frontmatter = source[0:frontmatter_end] source = source[frontmatter_end + 5:] if return_frontmatter: return (source, frontmatter) return source if return_frontmatter: return (source, None) return source
If there's one, remove the YAML front-matter from the source
def populate_obj(obj, attrs): for k, v in attrs.iteritems(): setattr(obj, k, v)
Populates an object's attributes using the provided dict
def insert_element_to_dict_of_list(dict_of_list, key, parser): if key in dict_of_list.keys(): dict_of_list[key].append(parser) else: dict_of_list[key] = [parser]
Utility method :param dict_of_list: :param key: :param parser: :return:
def insert_element_to_dict_of_dicts_of_list(dict_of_dict_of_list, first_key, second_key, parser): list_to_insert = parser if isinstance(parser, list) else [parser] if first_key not in dict_of_dict_of_list.keys(): dict_of_dict_of_list[first_key] = {second_key: list_to_insert} else: if second_key not in dict_of_dict_of_list[first_key].keys(): dict_of_dict_of_list[first_key][second_key] = list_to_insert else: dict_of_dict_of_list[first_key][second_key] += list_to_insert
Utility method :param dict_of_dict_of_list: :param first_key: :param second_key: :param parser: :return:
def insert_element_to_dict_of_dicts(dict_of_dicts: Dict[str, Dict[str, str]], first_key: str, second_key: str, contents): if first_key not in dict_of_dicts.keys(): dict_of_dicts[first_key] = {second_key: contents} else: if second_key not in dict_of_dicts[first_key].keys(): dict_of_dicts[first_key][second_key] = contents else: warn('Overriding contents for ' + first_key + '/' + second_key) dict_of_dicts[first_key][second_key] = contents
Utility method :param dict_of_dicts: :param first_key: :param second_key: :param contents: :return:
def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T], logger: Logger = None) -> Parser: pass
Returns the most appropriate parser to use to parse object obj_on_filesystem as an object of type object_type :param obj_on_filesystem: the filesystem object to parse :param object_type: the type of object that the parser is expected to produce :param logger: :return:
def create(obj: PersistedObject, obj_type: Type[T], extensions_supported: Iterable[str]): # base message msg = "{obj} cannot be parsed as a {typ} because no parser supporting that extension ({ext}) is able to " \ "create this type of object." \ "".format(obj=obj, typ=get_pretty_type_str(obj_type), ext=obj.get_pretty_file_ext()) # add details if extensions_supported is not None and len(extensions_supported) > 0: msg += " If you wish to parse this fileobject to that precise type, you may wish to either " \ "(1) replace the file with any of the following extensions currently supported : {exts} " \ "(see get_capabilities_for_type({typ}, strict_type_matching=False) for details)." \ " Or (2) register a new parser." \ "".format(exts=extensions_supported, typ=get_pretty_type_str(obj_type)) else: raise ValueError('extensions_supported should be provided to create a NoParserFoundForObjectExt. If no ' 'extension is supported, use NoParserFoundForObjectType.create instead') e = NoParserFoundForObjectExt(msg) # save the extensions supported e.extensions_supported = extensions_supported return e
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param extensions_supported: :return:
def create(obj: PersistedObject, obj_type: Type[T], types_supported: Iterable[str]): # base message msg = str(obj) + ' cannot be parsed as a ' + get_pretty_type_str(obj_type) + ' because no parser supporting ' \ 'that type is registered for ' + obj.get_pretty_file_ext() + '.\n' # add details if types_supported is not None and len(types_supported) > 0: msg += ' If you wish to parse this object from this extension, you may wish to parse it as one of the ' \ 'following supported types : ' + str(types_supported) + '. \n' \ + 'Otherwise, please register a new parser for type ' + get_pretty_type_str(obj_type) \ + ' and extension ' + obj.get_pretty_file_ext() + '\n Reminder: use print_capabilities_by_ext()' \ + ' and print_capabilities_by_type() to diagnose what are the parsers available' else: raise ValueError('extensions_supported should be provided to create a NoParserFoundForObjectExt. If no ' 'extension is supported, use NoParserFoundForObjectType.create instead') e = NoParserFoundForObjectType(msg) # save the extensions supported e.types_supported = types_supported return e
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param types_supported: :return:
def create(obj: PersistedObject, obj_type: Type[T], errors: Dict[Type, Exception]): e = NoParserFoundForUnionType('{obj} cannot be parsed as a {typ} because no parser could be found for any of ' 'the alternate types. Caught exceptions: {errs}' ''.format(obj=obj, typ=get_pretty_type_str(obj_type), errs=errors)) # save the errors e.errors = errors return e
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param errors: a dictionary of the errors raised for each alternate type tried :return:
def register_parsers(self, parsers: List[Parser]): check_var(parsers, var_types=list, var_name='parsers') for parser in parsers: self.register_parser(parser)
Utility method to register any list of parsers. :return:
def print_capabilities_by_ext(self, strict_type_matching: bool = False): print('\nCapabilities by file extension: ') l = self.get_capabilities_by_ext(strict_type_matching=strict_type_matching) pprint({ext: get_pretty_type_keys_dict(parsers) for ext, parsers in l.items()}) print('\n')
Used to print the list of all file extensions that can be parsed by this parser registry. :return:
def print_capabilities_by_type(self, strict_type_matching: bool = False): print('\nCapabilities by object type: ') l = self.get_capabilities_by_type(strict_type_matching=strict_type_matching) pprint({get_pretty_type_str(typ): parsers for typ, parsers in l.items()}) print('\n')
Used to print the list of all file extensions that can be parsed by this parser registry. :return:
def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]: check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # List all types that can be parsed for typ in self.get_all_supported_types(): res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) return res
For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return:
def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]: check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # For all extensions that are supported, for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching): res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching) return res
For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return:
def get_capabilities_for_ext(self, ext, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Parser]]: r = dict() # List all types that can be parsed from this extension. for typ in self.get_all_supported_types_for_ext(ext): # Use the query to fill matching = self.find_all_matching_parsers(strict_type_matching, desired_type=typ, required_ext=ext)[0] # matching_list = matching[0] + matching[1] + matching[2] # insert_element_to_dict_of_dicts_of_list(res, ext, typ, list(reversed(matching_list))) r[typ] = dict() exact = list(reversed(matching[2])) if len(exact) > 0: r[typ]['1_exact_match'] = exact approx = list(reversed(matching[1])) if len(approx) > 0: r[typ]['2_approx_match'] = approx generic = list(reversed(matching[0])) if len(generic) > 0: r[typ]['3_generic'] = generic # insert_element_to_dict_of_dicts(res, ext, typ, matching_dict) return r
Utility method to return, for a given file extension, all known ways to parse a file with this extension, organized by target object type. :param ext: :param strict_type_matching: :return:
def register_parser(self, parser: Parser): check_var(parser, var_types=Parser, var_name='parser') if (not parser.supports_multifile()) and (not parser.supports_singlefile()): # invalid raise _InvalidParserException.create(parser) # (0) sanity check : check that parser handles jokers properly res = parser.is_able_to_parse_detailed(desired_type=JOKER, desired_ext=JOKER, strict=True) if not (res[0] is True and res[1] is None): raise ValueError('Parser ' + str(parser) + ' can not be registered since it does not handle the JOKER cases ' 'correctly') # (1) store in the main lists if parser.is_generic(): self._generic_parsers.append(parser) else: self._specific_parsers.append(parser) # (2) simpler : simply store the ext <> type maps for ext in parser.supported_exts: for typ in parser.supported_types: insert_element_to_dict_of_list(self._strict_types_to_ext, typ, ext) insert_element_to_dict_of_list(self._ext_to_strict_types, ext, typ)
Utility method to register any parser. Parsers that support any type will be stored in the "generic" list, and the others will be stored in front of the types they support :return:
def get_all_parsers(self, strict_type_matching: bool = False) -> List[Parser]: matching = self.find_all_matching_parsers(strict=strict_type_matching)[0] # matching[1] (approx match) is supposed to be empty since we use a joker on type and a joker on ext : only # exact and generic match should exist, no approx match if len(matching[1]) > 0: raise Exception('Internal error - this matching[1] list is supposed to be empty for such a query') return matching[0] + matching[2]
Returns the list of all parsers in order of relevance. :return:
def get_all_supported_types_for_ext(self, ext_to_match: str, strict_type_matching: bool = False) -> Set[Type]: matching = self.find_all_matching_parsers(required_ext=ext_to_match, strict=strict_type_matching)[0] return {typ for types in [p.supported_types for p in (matching[0] + matching[1] + matching[2])] for typ in types}
Utility method to return the set of all supported types that may be parsed from files with the given extension. ext=JOKER is a joker that means all extensions :param ext_to_match: :param strict_type_matching: :return:
def get_all_supported_exts_for_type(self, type_to_match: Type[Any], strict: bool) -> Set[str]: matching = self.find_all_matching_parsers(desired_type=type_to_match, strict=strict)[0] return {ext for exts in [p.supported_exts for p in (matching[0] + matching[1] + matching[2])] for ext in exts}
Utility method to return the set of all supported file extensions that may be converted to objects of the given type. type=JOKER is a joker that means all types :param type_to_match: :param strict: :return:
def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False) -> ParsingPlan[T]: # find the parser for this object t, combined_parser = self.build_parser_for_fileobject_and_desiredtype(filesystem_object, desired_type, logger=logger) # ask the parser for the parsing plan return combined_parser.create_parsing_plan(t, filesystem_object, logger)
Implementation of Parser API Relies on the underlying registry of parsers to provide the best parsing plan :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return:
def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]): base_msg = "Error while trying to convert value for attribute '{a}' to type <{t}>:\n" \ " - parsed value is : '{v}' of type <{tv}>\n" \ "".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att, tv=get_pretty_type_str(type(parsed_att))) msg = StringIO() if len(list(caught_exec.keys())) > 0: msg.writelines(' - converters tried are : \n * ') msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()])) msg.writelines(' \n Caught the following exceptions: \n') for converter, err in caught_exec.items(): msg.writelines('--------------- From ' + str(converter) + ' caught: \n') print_error_to_io_stream(err, msg) msg.write('\n') return AttrConversionException(base_msg + msg.getvalue())
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param att_name: :param parsed_att: :param attribute_type: :param caught_exec: :return:
def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None): if conversion_finder is None: msg = "No conversion finder provided to find a converter between parsed attribute '{patt}' of type " \ "'{typ}' and expected type '{expt}'.".format(patt=str(parsed_att), typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type)) else: msg = "No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type " \ "'{expt}' using conversion finder {conv}.".format(patt=parsed_att, typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type), conv=conversion_finder) if errors is not None: msg = msg + ' ' + str(errors) return NoConverterFoundForObjectType(msg)
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parsed_att: :param attribute_type: :param conversion_finder: :return:
def get_all_conversion_chains_to_type(self, to_type: Type[Any])\ -> Tuple[List[Converter], List[Converter], List[Converter]]: return self.get_all_conversion_chains(to_type=to_type)
Utility method to find all converters to a given type :param to_type: :return:
def get_all_conversion_chains_from_type(self, from_type: Type[Any]) \ -> Tuple[List[Converter], List[Converter], List[Converter]]: return self.get_all_conversion_chains(from_type=from_type)
Utility method to find all converters from a given type. :param from_type: :return:
def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\ -> Tuple[List[Converter], List[Converter], List[Converter]]: pass
Utility method to find all converters or conversion chains matching the provided query. :param from_type: a required type of input object, or JOKER for 'wildcard'(*) . WARNING: "from_type=AnyObject/object/Any" means "all converters able to source from anything", which is different from "from_type=JOKER" which means "all converters whatever their source type". :param to_type: a required type of output object, or JOKER for 'wildcard'(*) . WARNING: "to_type=AnyObject/object/Any" means "all converters able to produce any type of object", which is different from "to_type=JOKER" which means "all converters whatever type they are able to produce". :return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
def find_and_convert(self, attr_name: str, attr_value: S, desired_attr_type: Type[T], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: if robust_isinstance(attr_value, desired_attr_type) and not is_collection(desired_attr_type): # value is already of the correct type return attr_value else: # try to find conversion chains generic, approx, exact = self.get_all_conversion_chains(type(attr_value), desired_attr_type) all_chains = generic + approx + exact if len(all_chains) > 0: all_errors = dict() for chain in reversed(all_chains): try: return chain.convert(desired_attr_type, attr_value, logger, options) except Exception as e: all_errors[chain] = e raise AttrConversionException.create(attr_name, attr_value, desired_attr_type, all_errors) else: # did not find any conversion chain raise NoConverterFoundForObjectType.create(self, attr_value, desired_attr_type)
Utility method to convert some value into the desired type. It relies on get_all_conversion_chains to find the converters, and apply them in correct order :return:
def _try_convert_value(conversion_finder, attr_name: str, attr_value: S, desired_attr_type: Type[T], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: # check if we need additional conversion # (a) a collection with details about the internal item type if is_typed_collection(desired_attr_type): return ConversionFinder.convert_collection_values_according_to_pep(coll_to_convert=attr_value, desired_type=desired_attr_type, conversion_finder=conversion_finder, logger=logger, **options) # --- typing types do not work with isinstance so there is a special check here elif not robust_isinstance(attr_value, desired_attr_type): if conversion_finder is not None: return conversion_finder.find_and_convert(attr_name, attr_value, desired_attr_type, logger, options) else: raise NoConverterFoundForObjectType.create(conversion_finder, attr_value, desired_attr_type) else: # we can safely use the value: it is already of the correct type return attr_value
Utility method to try to use provided conversion_finder to convert attr_value into desired_attr_type. If no conversion is required, the conversion finder is not even used (it can be None) :param conversion_finder: :param attr_name: :param attr_value: :param desired_attr_type: :param logger: :param options: :return:
def register_converter(self, converter: Converter[S, T]): check_var(converter, var_types=Converter, var_name='converter') # (0) sanity check : check that parser handles jokers properly res = converter.is_able_to_convert_detailed(from_type=JOKER, to_type=JOKER, strict=True) if not (res[0] is True and res[1] is None and res[2] is None): raise ValueError('Converter ' + str(converter) + ' can not be registered since it does not handle the JOKER' ' cases correctly') # compute all possible chains and save them generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains \ = self._create_all_new_chains(converter) self._generic_nonstrict_conversion_chains += generic_nonstrict_chains self._generic_conversion_chains += generic_chains self._specific_non_strict_conversion_chains += specific_nonstrict_chains self._specific_conversion_chains += specific_chains # sort all lists by length self._generic_nonstrict_conversion_chains = sorted(self._generic_nonstrict_conversion_chains, key=len, reverse=True) self._generic_conversion_chains = sorted(self._generic_conversion_chains, key=len, reverse=True) self._specific_non_strict_conversion_chains = sorted(self._specific_non_strict_conversion_chains, key=len, reverse=True) self._specific_conversion_chains = sorted(self._specific_conversion_chains, key=len, reverse=True)
Utility method to register any converter. Converters that support any type will be stored in the "generic" lists, and the others will be stored in front of the types they support :return:
def match_version_pattern(filename, pattern): if "{VERSION}" not in pattern: raise ValueError("pattern does not contain a {VERSION} reference") pattern = pattern.replace('{VERSION}', '(?P<v>[\d\w\.\-_]+)') expr = re.compile(pattern) with open(filename) as fp: lines = fp.read().split('\n') for i, line in enumerate(lines): match = expr.search(line) if match: return Match(filename, lines, line_index=i, version=Version(match.group('v')), span=match.span('v')) return None
Matches a single version upgrade pattern in the specified *filename* and returns the match information. Returns a #Match object or #None if the *pattern* did not match.
def get_changed_files(include_staged=False): process = subprocess.Popen(['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, __ = process.communicate() if process.returncode != 0: raise ValueError(stdout) files = [] for line in stdout.decode().split('\n'): if not line or line.startswith('#'): continue assert line[2] == ' ' if not include_staged and line[1] == ' ': continue files.append(line[3:]) return files
Returns a list of the files that changed in the Git repository. This is used to check if the files that are supposed to be upgraded have changed. If so, the upgrade will be prevented.
def _correct_args(func, kwargs): args = inspect.getargspec(func)[0] return [kwargs[arg] for arg in args] + kwargs['__args']
Convert a dictionary of arguments including __argv into a list for passing to the function.
def entrypoint(func): frame_local = sys._getframe(1).f_locals if '__name__' in frame_local and frame_local['__name__'] == '__main__': argv = sys.argv[1:] parser = signature_parser(func) try: kwargs = parser.parse_args(argv).__dict__ # special cli flags # --version is handled by ArgParse # if kwargs.get('version'): # print module_version(func) # return if 'version' in kwargs.keys(): del kwargs['version'] # --debug FORMAT = '%(asctime)-6s: %(name)s - %(levelname)s - %(message)s' if kwargs.get('debug'): logging.basicConfig( level=logging.DEBUG, format=FORMAT, ) del kwargs['debug'] if "__args" in kwargs: return func(*_correct_args(func, kwargs)) else: return func(**kwargs) except UsageError, e: parser.error(e.message) return func
A decorator for your main() function. Really a combination of @autorun and @acceptargv, so will run the function if __name__ == '__main__' with arguments extricated from argparse. As with @acceptargv, this must either be the innermost decorator, or separated only by "well-behaved" decorators that preserve the __doc__ attribute AND the function signature. As with @autorun, this must be theoutermost decorator, as any decorators further out will not be applied to the function until after it is run.
def autorun(func, _depth=1): frame_local = sys._getframe(_depth).f_locals if '__name__' in frame_local and frame_local['__name__'] == '__main__': func(argv=sys.argv[1:]) return func
Runs the function if the module in which it is declared is being run directly from the commandline. Putting the following after the function definition would be similar: if __name__ == '__main__': func() NOTE: This will work most expectedly as the outermost decorator, as it will call the function before any more outwards decorators have been applied.
def acceptargv(func): parser = signature_parser(func) def main(*args, **kw): argv = kw.get('argv', None) if argv == None: return func(*args, **kw) else: try: kwargs = parser.parse_args(argv).__dict__ # special cli flags # --version is handled by ArgParse # if kwargs.get('version'): # print module_version(func) # return if 'version' in kwargs.keys(): del kwargs['version'] # --debug if kwargs.get('debug'): logging.basicConfig(level=logging.DEBUG) del kwargs['debug'] if "__args" in kwargs: return func(*_correct_args(func, kwargs)) else: return func(**kwargs) except UsageError, e: parser.error(e.message) main.__doc__ = func.__doc__ main.__name__ = func.__name__ main.__module__ = func.__module__ main.__dict__ = func.__dict__.copy() return main
Transforms the signature of the function, and it's associated __doc__ into an argparse-parser, then calls the function with the results of using said parser. The function returned takes an optional argument, which is the list of parameters, if they are not given, sys.argv[1:] is used instead. The function may raise a UsageError() if it wants to signal an error that the user has made with the parameters, this is done by @withuserfile for example. CAVEAT: this relies on the argument signature of the function, if that has been destroyed, perhaps by a badly behaved decorator, this won't work as expected. CAVEAT2: this destroys the argument signature of the function ;)
def quote(text): 'Handle quote characters' # Convert to unicode. if not isinstance(text, unicode): text = text.decode('utf-8') # Look for quote characters. Keep the text as is if it's already quoted. for qp in QUOTEPAIRS: if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2: return text # If it's not quoted, try quoting for qp in QUOTEPAIRS: if qp[1] not in text: return qp[0] + text + qp[1] #Darn raise ValueError(u'The value "%s" is not quoted and contains too many quote characters to quote' % textf quote(text): 'Handle quote characters' # Convert to unicode. if not isinstance(text, unicode): text = text.decode('utf-8') # Look for quote characters. Keep the text as is if it's already quoted. for qp in QUOTEPAIRS: if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2: return text # If it's not quoted, try quoting for qp in QUOTEPAIRS: if qp[1] not in text: return qp[0] + text + qp[1] #Darn raise ValueError(u'The value "%s" is not quoted and contains too many quote characters to quote' % text)
Handle quote characters
def register_opener(suffix, opener=None): if opener is None: def decorator(func): register_opener(suffix, func) return func return decorator if suffix in openers: raise ValueError('opener suffix {0!r} already registered'.format(suffix)) openers[suffix] = opener
Register a callback that opens an archive with the specified *suffix*. The object returned by the *opener* must implement the #tarfile.Tarfile interface, more specifically the following methods: - `add(filename, arcname) -> None` - `getnames() -> list of str` - `getmember(filename) -> TarInfo` - `extractfile(filename) -> file obj` This function can be used as a decorator when *opener* is not provided. The opener must accept the following arguments: %%arglist file (file-like): A file-like object to read the archive data from. mode (str): The mode to open the file in. Valid values are `'w'`, `'r'` and `'a'`. options (dict): A dictionary with possibly additional arguments.
def get_opener(filename): for suffix, opener in openers.items(): if filename.endswith(suffix): return suffix, opener raise UnknownArchive(filename)
Finds a matching opener that is registed with :func:`register_opener` and returns a tuple ``(suffix, opener)``. If there is no opener that can handle this filename, :class:`UnknownArchive` is raised.
def open(filename=None, file=None, mode='r', suffix=None, options=None): if mode not in ('r', 'w', 'a'): raise ValueError("invalid mode: {0!r}".format(mode)) if suffix is None: suffix, opener = get_opener(filename) if file is not None: filename = None # We don't need it anymore. else: if file is not None and filename is not None: raise ValueError("filename must not be set with file & suffix specified") try: opener = openers[suffix] except KeyError: raise UnknownArchive(suffix) if options is None: options = {} if file is not None: if mode in 'wa' and not hasattr(file, 'write'): raise TypeError("file.write() does not exist", file) if mode == 'r' and not hasattr(file, 'read'): raise TypeError("file.read() does not exist", file) if [filename, file].count(None) != 1: raise ValueError("either filename or file must be specified") if filename is not None: file = builtins.open(filename, mode + 'b') try: return opener(file, mode, options) except: if filename is not None: file.close() raise
Opens the archive at the specified *filename* or from the file-like object *file* using the appropriate opener. A specific opener can be specified by passing the *suffix* argument. # Parameters filename (str): A filename to open the archive from. file (file-like): A file-like object as source/destination. mode (str): The mode to open the archive in. suffix (str): Possible override for the *filename* suffix. Must be specified when *file* is passed instead of *filename*. options (dict): A dictionary that will be passed to the opener with which additional options can be specified. return (archive-like): An object that represents the archive and follows the interface of the #tarfile.TarFile class.
def transitions_to(self, dst): ''' returns enumerable of (prevstate, t) tuples this is super slow and needs to be sped up ''' if dst in self._transitions_to: for t in self._transitions_to[dst]: for s in self._transitions_to[dst][t]: yield (s, tf transitions_to(self, dst): ''' returns enumerable of (prevstate, t) tuples this is super slow and needs to be sped up ''' if dst in self._transitions_to: for t in self._transitions_to[dst]: for s in self._transitions_to[dst][t]: yield (s, t)
returns enumerable of (prevstate, t) tuples this is super slow and needs to be sped up
def _add_epsilon_states(self, stateset, gathered_epsilons): ''' stateset is the list of initial states gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries ''' for i in list(stateset): if i not in gathered_epsilons: gathered_epsilons[i] = {} q = _otq() q.append(i) while q: s = q.popleft() for j in self._transitions.setdefault(s, {}).setdefault(NFA.EPSILON, set()): gathered_epsilons[i][j] = s if j not in gathered_epsilons[i] else self.choose(s, j) q.append(j) stateset.update(gathered_epsilons[i].keys()f _add_epsilon_states(self, stateset, gathered_epsilons): ''' stateset is the list of initial states gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries ''' for i in list(stateset): if i not in gathered_epsilons: gathered_epsilons[i] = {} q = _otq() q.append(i) while q: s = q.popleft() for j in self._transitions.setdefault(s, {}).setdefault(NFA.EPSILON, set()): gathered_epsilons[i][j] = s if j not in gathered_epsilons[i] else self.choose(s, j) q.append(j) stateset.update(gathered_epsilons[i].keys())
stateset is the list of initial states gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries
def _add_training_data(self, src, dst, symbol): src_data = self.training_data[src] for (s, v) in src_data: if s == dst: v.append(symbol) return src_data.append((dst, [symbol]))
Training_data is a dictionary from strings to lists. - Each string (key) is an access string - Each list (value) is a list of tuples (target_state, [symbols directed to that state]). These represent that a transition exists from the state used as key to the first part of the training_data to the dst state which is the first part of the tuple with all the symbols in the list in the SECOND part of the tuple. Args: src (str): The source state dst (str): The target state symbol (str): The transition symbol Returns: None
def is_closed(self): old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ "No update happened from previous round. The algo will loop infinetely" return True, None
_check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned.
def _fill_table_entry(self, row, col): " Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None """ self.observation_table[row, col] = self._membership_query(row + col)
Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None
def _run_in_hypothesis(self, mma, w_string, index): " Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string """ state = mma.states[0] s_index = 0 for i in range(index): for arc in state: if arc.guard.is_sat(w_string[i]): state = mma.states[arc.dst_state] s_index = arc.dst_state # The id of the state is its index inside the Sm list access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string
Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string
def _get_predicate_guards(self, state, state_training_data): # choose the sink transition. # First option: Just the maximum transition # sink = max(state_training_data, key=lambda x: len(x[1]))[0] # Second option: Heuristics based on RE filters properties max_size_trans = max(state_training_data, key=lambda x: len(x[1])) max_size_trans_l = [x for x in state_training_data if len(x[1]) == len(max_size_trans[1])] target_states = [t[0] for t in max_size_trans_l] if len(max_size_trans_l) == 1: sink = max_size_trans[0] elif '' in target_states: sink = '' elif state in target_states: sink = state else: sink = random.choice(target_states) # End of sink selection transitions = [] known_symbols = [] for (t, data) in state_training_data: if t == sink: continue pred = SetPredicate(data) transitions.append((t, pred)) known_symbols += data transitions.append( (sink, SetPredicate(set(self.alphabet) - set(known_symbols)))) return transitions
Args: state (DFA state): The dfa state state_training_data (list): The training data set Returns: list: A list of transitions
def get_sfa_conjecture(self): sfa = SFA(self.alphabet) for s in self.observation_table.sm_vector: transitions = self._get_predicate_guards( s, self.observation_table.training_data[s]) for (t, pred) in transitions: src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(t) assert isinstance( pred, SetPredicate), "Invalid type for predicate {}".format(pred) sfa.add_arc(src_id, dst_id, pred) # Mark the final states in the hypothesis automaton. i = 0 for s in self.observation_table.sm_vector: sfa.states[i].final = self.observation_table[s, self.epsilon] i += 1 return sfa
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
def _init_table(self): self.observation_table.sm_vector.append(self.epsilon) self.observation_table.smi_vector = [random.choice(self.alphabet)] self.observation_table.em_vector.append(self.epsilon) self._fill_table_entry(self.epsilon, self.epsilon) for s in self.observation_table.smi_vector: self._fill_table_entry(s, self.epsilon)
Initialize the observation table.
def _init_table_from_dfa(self, mma): observation_table_init = ObservationTableInit(self.epsilon, self.alphabet) sm_vector, smi_vector, em_vector = observation_table_init.initialize(mma, True) self.observation_table.sm_vector = sm_vector self.observation_table.smi_vector = smi_vector self.observation_table.em_vector = em_vector logging.info('Initialized from DFA em_vector table is the following:') logging.info(em_vector) self._fill_table_entry(self.epsilon, self.epsilon) # list(set([])) is used to remove duplicates, [1:0] to remove epsilon for row in sorted(list(set(sm_vector + smi_vector)), key=len)[1:]: for column in em_vector: self._fill_table_entry(str(row), str(column))
Initializes table form a DFA Args: mma: The input automaton Returns: None
def learn_sfa(self, mma=None): logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed while not closed: logging.debug('Checking if table is closed.') closed, s = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(s) else: logging.debug('Table closed.') # Create conjecture sfa = self.get_sfa_conjecture() logging.info('Generated conjecture machine with %d states.', len(list(sfa.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(sfa) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info( 'Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(sfa, counter_example) logging.info('Learning complete.') return '', sfa
Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma: Returns: MealyMachine: A model for the Mealy machine to be learned.
def make_log_metric(level=logging.INFO, msg="%d items in %.2f seconds"): def log_metric(name, count, elapsed): log_name = 'instrument.{}'.format(name) if name else 'instrument' logging.getLogger(log_name).log(level, msg, count, elapsed) return log_metric
Make a new metric function that logs at the given level :arg int level: logging level, defaults to ``logging.INFO`` :arg string msg: logging message format string, taking ``count`` and ``elapsed`` :rtype: function
def _auto_init(self, *args, **kwrds): for fld in getattr(self, '__fields__', []): val = kwrds.get(fld.name, _NO_VAL) if val is _NO_VAL: val = fld.get_default_val() setattr(self, fld.name, val) if callable(getattr(self, 'setup', None)): self.setup(*args, **kwrds)
Our decorator will add this as __init__ to target classes.
def ctor_overridable(cls): prev_init = getattr(cls, "__init__", None) if not callable(prev_init): return True if prev_init in [object.__init__, _auto_init]: return True if getattr(prev_init, '_clobber_ok', False): return True print(cls, prev_init, getattr(prev_init, '_clobber_ok', 'missing')) return False
Return true if cls has on overridable __init__.
def get_default_val(self): val = self.default while callable(val): val = val() return val
Helper to expand default value (support callables).
def train(self, data, target, **kwargs): non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"] self.column_names = [l for l in list(data.columns) if l not in non_predictors] results, folds = self.cross_validate(data, non_predictors, **kwargs) self.gather_results(results, folds, data)
Used in the training phase. Override.
def compile(self, code): is_plain_text = True compiled_regex = r"" for chunk in self.delimiter_regex().split(code): if is_plain_text: compiled_regex = compiled_regex + simex_escape(chunk, flexible_whitespace=self._flexible_whitespace) else: stripped_chunk = chunk.strip() if stripped_chunk in self._regexes.keys(): compiled_regex = u"{0}{1}".format( compiled_regex, self._regexes[stripped_chunk] ) else: raise KeyNotFound("'{0}' not found in keys") is_plain_text = not is_plain_text if self._exact: compiled_regex = r"^" + compiled_regex + r"$" return compile(compiled_regex)
Compile a simex code (e.g. <a href="{{ url }}">{{ anything }}</a>) to regex. Returns regex.
def as_command(self): try: params = self.unbound_func.__click_params__ params.reverse() del self.unbound_func.__click_params__ except AttributeError: params = [] help = inspect.getdoc(self.real_func) if isinstance(help, bytes): help = help.decode('utf-8') self.options.setdefault('help', help) @pass_script_info_decorator def callback(info, *args, **kwargs): if self.with_reloader: app = info.load_app() if app.debug: def inner(): return self.command_callback(info, *args, **kwargs) run_with_reloader(inner, extra_files=get_reloader_extra_files()) return self.command_callback(info, *args, **kwargs) return self.cls(name=self.name, callback=callback, params=params, **self.options)
Creates the click command wrapping the function
def priority(var): order = dict(JQUERY='0', BOOTSTRAP='1') return order.get(var, var)
Prioritizes resource position in the final HTML. To be fed into sorted(key=). Javascript consoles throw errors if Bootstrap's js file is mentioned before jQuery. Using this function such errors can be avoided. Used internally. Positional arguments: var -- value sent by list.sorted(), which is a value in Statics().all_variables. Returns: Either a number if sorting is enforced for the value in `var`, or returns `var` itself.