sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def randtld(self): """ -> a random #str tld via :mod:tlds """ self.tlds = tuple(tlds.tlds) if not self.tlds else self.tlds return self.random.choice(self.tlds)
-> a random #str tld via :mod:tlds
entailment
def randurl(self): """ -> a random url-like #str via :prop:randdomain, :prop:randtld, and :prop:randpath """ return "{}://{}.{}/{}".format( self.random.choice(("http", "https")), self.randdomain, self.randtld, self.randpath)
-> a random url-like #str via :prop:randdomain, :prop:randtld, and :prop:randpath
entailment
def randtuple(self): """ -> a #tuple of random #int """ return tuple( self.randint for x in range(0, self.random.randint(3, 10)))
-> a #tuple of random #int
entailment
def randdeque(self): """ -> a :class:collections.deque of random #int """ return deque( self.randint for x in range(0, self.random.randint(3, 10)))
-> a :class:collections.deque of random #int
entailment
def randdict(self): """ -> a #dict of |{random_string: random_int}| """ return { self.randstr: self._map_type(int) for x in range(self.random.randint(3, 10))}
-> a #dict of |{random_string: random_int}|
entailment
def randset(self): """ -> a #set of random integers """ return { self._map_type(int) for x in range(self.random.randint(3, 10))}
-> a #set of random integers
entailment
def _to_tuple(self, _list): """ Recursively converts lists to tuples """ result = list() for l in _list: if isinstance(l, list): result.append(tuple(self._to_tuple(l))) else: result.append(l) return tuple(result)
Recursively converts lists to tuples
entailment
def dict(self, key_depth=1000, tree_depth=1): """ Creates a random #dict @key_depth: #int number of keys per @tree_depth to generate random values for @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| -> random #dict """ if not tree_depth: return self._map_type() return { self.randstr: self.dict(key_depth, tree_depth-1) for x in range(key_depth)}
Creates a random #dict @key_depth: #int number of keys per @tree_depth to generate random values for @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| -> random #dict
entailment
def defaultdict(self, key_depth=1000, tree_depth=1): """ Creates a random :class:collections.defaultdict @key_depth: #int number of keys per @tree_depth to generate random values for @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| -> random :class:collections.defaultdict """ if not tree_depth: return self._map_type() _dict = defaultdict() _dict.update({ self.randstr: self.defaultdict(key_depth, tree_depth-1) for x in range(key_depth)}) return _dict
Creates a random :class:collections.defaultdict @key_depth: #int number of keys per @tree_depth to generate random values for @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| -> random :class:collections.defaultdict
entailment
def tuple(self, size=1000, tree_depth=1): """ Creates a random #tuple @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| -> random #tuple """ if not tree_depth: return self._map_type() return tuple(self.tuple(size, tree_depth-1) for x in range(size))
Creates a random #tuple @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| -> random #tuple
entailment
def generator(self, size=1000, tree_depth=1): """ Creates a random #generator @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| -> random :class:collections.deque """ if not tree_depth: return self._map_type() return (self.generator(size, tree_depth-1) for x in range(size))
Creates a random #generator @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| -> random :class:collections.deque
entailment
def sequence(self, struct, size=1000, tree_depth=1, append_callable=None): """ Generates random values for sequence-like objects @struct: the sequence-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| @append_callable: #callable method which appends/adds data to your sequence-like structure - e.g. :meth:list.append -> random @struct .. from collections import UserList from vital.debug import RandData class MySequence(UserList): pass rd = RandData(int) my_seq = MySequence() rd.sequence(my_seq, 3, 1, my_seq.append) # -> [88508293836062443, 49097807561770961, 55043550817099444] .. """ if not tree_depth: return self._map_type() _struct = struct() add_struct = _struct.append if not append_callable \ else getattr(_struct, append_callable) for x in range(size): add_struct(self.sequence( struct, size, tree_depth-1, append_callable)) return _struct
Generates random values for sequence-like objects @struct: the sequence-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| @append_callable: #callable method which appends/adds data to your sequence-like structure - e.g. :meth:list.append -> random @struct .. from collections import UserList from vital.debug import RandData class MySequence(UserList): pass rd = RandData(int) my_seq = MySequence() rd.sequence(my_seq, 3, 1, my_seq.append) # -> [88508293836062443, 49097807561770961, 55043550817099444] ..
entailment
def mapping(self, struct, key_depth=1000, tree_depth=1, update_callable=None): """ Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict): pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} .. """ if not tree_depth: return self._map_type() _struct = struct() add_struct = _struct.update if not update_callable \ else getattr(_struct, update_callable) for x in range(key_depth): add_struct({ self.randstr: self.mapping( struct, key_depth, tree_depth-1, update_callable) }) return _struct
Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict): pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} ..
entailment
def _dict_prefix(self, key, value, i, dj=0, color=None, separator=":"): just = self._justify if i > 0 else dj key = cut(str(key), self._key_maxlen).rjust(just) key = colorize(key, color=color) pref = "{}{} {}".format(key, separator, value) """pref = "{}{} {}".format(colorize(str(key)[:self._key_maxlen]\ .rjust(just), color=color), separator, value)""" return pref
pref = "{}{} {}".format(colorize(str(key)[:self._key_maxlen]\ .rjust(just), color=color), separator, value)
entailment
def _format_numeric_sequence(self, _sequence, separator="."): """ Length of the highest index in chars = justification size """ if not _sequence: return colorize(_sequence, "purple") _sequence = _sequence if _sequence is not None else self.obj minus = (2 if self._depth > 0 else 0) just_size = len(str(len(_sequence))) out = [] add_out = out.append for i, item in enumerate(_sequence): self._incr_just_size(just_size+minus) add_out(self._numeric_prefix( i, self.pretty(item, display=False), just=just_size, color="blue", separator=separator)) self._decr_just_size(just_size+minus) if not self._depth: return padd("\n".join(out) if out else str(out), padding="top") else: return "\n".join(out) if out else str(out)
Length of the highest index in chars = justification size
entailment
def objname(self, obj=None): """ Formats object names in a pretty fashion """ obj = obj or self.obj _objname = self.pretty_objname(obj, color=None) _objname = "'{}'".format(colorize(_objname, "blue")) return _objname
Formats object names in a pretty fashion
entailment
def pretty(self, obj=None, display=True): """ Formats @obj or :prop:obj @obj: the object you'd like to prettify -> #str pretty object """ ret = self._format_obj(obj if obj is not None else self.obj) if display: print(ret) else: return ret
Formats @obj or :prop:obj @obj: the object you'd like to prettify -> #str pretty object
entailment
def _format_obj(self, item=None): """ Determines the type of the object and maps it to the correct formatter """ # Order here matters, odd behavior with tuples if item is None: return getattr(self, 'number')(item) elif isinstance(item, self.str_): #: String return item + " " elif isinstance(item, bytes): #: Bytes return getattr(self, 'bytes')(item) elif isinstance(item, self.numeric_): #: Float, int, etc. return getattr(self, 'number')(item) elif isinstance(item, self.dict_): #: Dict return getattr(self, 'dict')(item) elif isinstance(item, self.list_): #: List return getattr(self, 'list')(item) elif isinstance(item, tuple): #: Tuple return getattr(self, 'tuple')(item) elif isinstance(item, types.GeneratorType): #: Generator return getattr(self, 'generator')(item) elif isinstance(item, self.set_): #: Set return getattr(self, 'set')(item) elif isinstance(item, deque): #: Deque return getattr(self, 'deque')(item) elif isinstance(item, Sequence): #: Sequence return getattr(self, 'sequence')(item) #: Any other object return getattr(self, 'object')(item)
Determines the type of the object and maps it to the correct formatter
entailment
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"): """ Pretty prints object name @obj: the object whose name you want to pretty print @maxlen: #int maximum length of an object name to print @color: your choice of :mod:colors or |None| -> #str pretty object name .. from vital.debug import Look print(Look.pretty_objname(dict)) # -> 'dict\x1b[1;36m<builtins>\x1b[1;m' .. """ parent_name = lambda_sub("", get_parent_name(obj) or "") objname = get_obj_name(obj) if color: objname += colorize("<{}>".format(parent_name), color, close=False) else: objname += "<{}>".format(parent_name) objname = objname if len(objname) < maxlen else \ objname[:(maxlen-1)]+"…>" if color: objname += colors.RESET return objname
Pretty prints object name @obj: the object whose name you want to pretty print @maxlen: #int maximum length of an object name to print @color: your choice of :mod:colors or |None| -> #str pretty object name .. from vital.debug import Look print(Look.pretty_objname(dict)) # -> 'dict\x1b[1;36m<builtins>\x1b[1;m' ..
entailment
def set_level(self, level): """ Sets :attr:loglevel to @level @level: #str one or several :attr:levels """ if not level: return None self.levelmap = set() for char in level: self.levelmap = self.levelmap.union(self.levels[char]) self.loglevel = level return self.loglevel
Sets :attr:loglevel to @level @level: #str one or several :attr:levels
entailment
def log(self, flag_message=None, padding=None, color=None, force=False): """ Log Level: :attr:LOG @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").log("Hello") # (Hello) World logg("Hello world").log() # Hello world .. """ if self.should_log(self.LOG) or force: self._print_message( flag_message=flag_message, color=color or colors.bold, padding=padding)
Log Level: :attr:LOG @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").log("Hello") # (Hello) World logg("Hello world").log() # Hello world ..
entailment
def success(self, flag_message="Success", padding=None, force=False): """ Log Level: :attr:SUCCESS @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").success("Hello") # (Hello) World logg("Hello world").success() # (Success) Hello world .. """ if self.should_log(self.SUCCESS) or force: self._print_message( flag_message=flag_message, color=colors.success_color, padding=padding)
Log Level: :attr:SUCCESS @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").success("Hello") # (Hello) World logg("Hello world").success() # (Success) Hello world ..
entailment
def complete(self, flag_message="Complete", padding=None, force=False): """ Log Level: :attr:COMPLETE @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").complete("Hello") # (Hello) World logg("Hello world").complete() # (Complete) Hello world .. """ if self.should_log(self.COMPLETE) or force: self._print_message( flag_message=flag_message, color=colors.complete_color, padding=padding)
Log Level: :attr:COMPLETE @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").complete("Hello") # (Hello) World logg("Hello world").complete() # (Complete) Hello world ..
entailment
def notice(self, flag_message="Notice", padding=None, force=False): """ Log Level: :attr:NOTICE @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").notice("Hello") # (Hello) World logg("Hello world").notice() # (Notice) Hello world .. """ if self.should_log(self.NOTICE) or force: self._print_message( flag_message=flag_message, color=colors.notice_color, padding=padding)
Log Level: :attr:NOTICE @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").notice("Hello") # (Hello) World logg("Hello world").notice() # (Notice) Hello world ..
entailment
def warning(self, flag_message="Warning", padding=None, force=False): """ Log Level: :attr:WARNING @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").warning("Hello") # (Hello) World logg("Hello world").warning() # (Warning) Hello world .. """ if self.should_log(self.WARNING) or force: self._print_message( flag_message=flag_message, color=colors.warning_color, padding=padding)
Log Level: :attr:WARNING @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").warning("Hello") # (Hello) World logg("Hello world").warning() # (Warning) Hello world ..
entailment
def error(self, flag_message="Error", padding=None, force=False): """ Log Level: :attr:ERROR @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").error("Hello") # (Hello) World logg("Hello world").error() # (Error) Hello world .. """ if self.should_log(self.ERROR) or force: self._print_message( flag_message=flag_message, color=colors.error_color, padding=padding)
Log Level: :attr:ERROR @flag_message: #str flags the message with the given text using :func:flag @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @color: #str colorizes @flag_message using :func:colorize @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("World").error("Hello") # (Hello) World logg("Hello world").error() # (Error) Hello world ..
entailment
def timing(self, flag_message, padding=None, force=False): """ Log Level: :attr:TIMING @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Finished in").timing(0.908) # Finished in (908.0ms) logg().timing(0.908) # (908.0ms) .. """ if self.should_log(self.TIMING) or force: self._print_message( flag_message=Timer.format_time(flag_message), padding=padding, reverse=True, color=colors.timing_color)
Log Level: :attr:TIMING @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Finished in").timing(0.908) # Finished in (908.0ms) logg().timing(0.908) # (908.0ms) ..
entailment
def count(self, flag_message, padding=None, force=False): """ Log Level: :attr:COUNT @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Total apps").count(3) # Total apps (3) logg().count([0, 1, 2, 3]) # (4) .. """ if self.should_log(self.COUNT) or force: flag_message = flag_message \ if isinstance(flag_message, (int, float)) else \ str(len(flag_message)) self._print_message( flag_message=flag_message, padding=padding, reverse=True, color=colors.timing_color)
Log Level: :attr:COUNT @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Total apps").count(3) # Total apps (3) logg().count([0, 1, 2, 3]) # (4) ..
entailment
def format_message(self, message): """ Formats a message with :class:Look """ look = Look(message) return look.pretty(display=False)
Formats a message with :class:Look
entailment
def format_messages(self, messages): """ Formats several messages with :class:Look, encodes them with :func:vital.tools.encoding.stdout_encode """ mess = "" for message in self.message: if self.pretty: mess = "{}{}".format(mess, self.format_message(message)) else: mess += str(message) if self.include_time: return ": {} : {}".format( datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), mess) return stdout_encode(mess)
Formats several messages with :class:Look, encodes them with :func:vital.tools.encoding.stdout_encode
entailment
def _print_message(self, flag_message=None, color=None, padding=None, reverse=False): """ Outputs the message to the terminal """ if flag_message: flag_message = stdout_encode(flag(flag_message, color=color if self.pretty else None, show=False)) if not reverse: print(padd(flag_message, padding), self.format_messages(self.message)) else: print(self.format_messages(self.message), padd(flag_message, padding)) else: print(self.format_messages(self.message)) self.message = []
Outputs the message to the terminal
entailment
def format_bar(self): """ Builds the progress bar """ pct = floor(round(self.progress/self.size, 2)*100) pr = floor(pct*.33) bar = "".join( ["‒" for x in range(pr)] + ["↦"] + [" " for o in range(self._barsize-pr-1)]) subprogress = self.format_parent_bar() if self.parent_bar else "" message = "Loading{} ={}{} ({}%)".format(subprogress, bar, "☉", pct) return message.ljust(len(message)+5)
Builds the progress bar
entailment
def finish(self): """ Resets the progress bar and clears it from the terminal """ pct = floor(round(self.progress/self.size, 2)*100) pr = floor(pct*.33) bar = "".join([" " for x in range(pr-1)] + ["↦"]) subprogress = self.format_parent_bar() if self.parent_bar else "" fin = "Loading{} ={}{} ({}%)".format(subprogress, bar, "ӿ", pct) print(fin.ljust(len(fin)+5), end="\r") time.sleep(0.10) print("\033[K\033[1A") self.progress = 0
Resets the progress bar and clears it from the terminal
entailment
def update(self, progress=0): """ Updates the progress bar with @progress if given, otherwise increments :prop:progress by 1. Also prints the progress bar. @progress: #int to assign to :prop:progress """ self.progress += (progress or 1) if self.visible: if self.progress % self._mod == 1 or\ self.progress == self.size - 1: print(self.format_bar(), end="\r") if self.progress == (self.size): self.finish()
Updates the progress bar with @progress if given, otherwise increments :prop:progress by 1. Also prints the progress bar. @progress: #int to assign to :prop:progress
entailment
def start(self): """ Starts the timer """ if not self._start: self._first_start = time.perf_counter() self._start = self._first_start else: self._start = time.perf_counter()
Starts the timer
entailment
def stop(self, precision=0): """ Stops the timer, adds it as an interval to :prop:intervals @precision: #int number of decimal places to round to -> #str formatted interval time """ self._stop = time.perf_counter() return self.add_interval(precision)
Stops the timer, adds it as an interval to :prop:intervals @precision: #int number of decimal places to round to -> #str formatted interval time
entailment
def format_time(self, sec): """ Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time """ # µsec if sec < 0.001: return "{}{}".format( colorize(round(sec*1000000, 2), "purple"), bold("µs")) # ms elif sec < 1.0: return "{}{}".format( colorize(round(sec*1000, 2), "purple"), bold("ms")) # s elif sec < 60.0: return "{}{}".format( colorize(round(sec, 2), "purple"), bold("s")) else: floored = floor(sec/60) return "{}{} {}{}".format( colorize(floored, "purple"), bold("m"), colorize(floor(sec-(floored*60)), "purple"), bold("s"))
Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time
entailment
def format_size(self, bytes): """ Pretty-formats given bytes size in a readable manner @bytes: #int or #float bytes -> #str formatted bytes """ # b if bytes < 1024: return "{}{}".format(colorize(round( bytes, 2), "purple"), bold("bytes")) # kb elif bytes < (1024*1000): return "{}{}".format(colorize(round( bytes/1024, 2), "purple"), bold("kB")) # mb elif bytes < (1024*1024): return "{}{}".format(colorize(round( bytes/1024, 2), "purple"), bold("MB"))
Pretty-formats given bytes size in a readable manner @bytes: #int or #float bytes -> #str formatted bytes
entailment
def add_interval(self, precision=0): """ Adds an interval to :prop:intervals -> #str formatted time """ precision = precision or self.precision interval = round((self._stop - self._start), precision) self.intervals.append(interval) self._intervals_len += 1 self._start = time.perf_counter() return self.format_time(interval)
Adds an interval to :prop:intervals -> #str formatted time
entailment
def time(self, intervals=1, *args, _show_progress=True, _print=True, _collect_garbage=True, _quiet=True, **kwargs): """ Measures the execution time of :prop:_callable for @intervals @intervals: #int number of intervals to measure the execution time of the function for @*args: arguments to pass to the callable being timed @**kwargs: arguments to pass to the callable being timed @_show_progress: #bool whether or not to print a progress bar @_print: #bool whether or not to print the results of the timing @_collect_garbage: #bool whether or not to garbage collect while timing @_quiet: #bool whether or not to disable the print() function's ability to output to terminal during the timing -> :class:collections.OrderedDict of stats about the timing """ self.reset() args = list(args) + list(self._callableargs[0]) _kwargs = self._callableargs[1] _kwargs.update(kwargs) kwargs = _kwargs if not _collect_garbage: gc.disable() # Garbage collection setting gc.collect() self.allocated_memory = 0 for x in self.progress(intervals): if _quiet: # Quiets print()s in the tested function sys.stdout = NullIO() try: self.start() # Starts the timer self._callable(*args, **kwargs) self.stop() # Stops the timer except Exception as e: if _quiet: # Unquiets prints() sys.stdout = sys.__stdout__ raise e if _quiet: # Unquiets prints() sys.stdout = sys.__stdout__ if not _collect_garbage: gc.enable() # Garbage collection setting if _print: self.info()
Measures the execution time of :prop:_callable for @intervals @intervals: #int number of intervals to measure the execution time of the function for @*args: arguments to pass to the callable being timed @**kwargs: arguments to pass to the callable being timed @_show_progress: #bool whether or not to print a progress bar @_print: #bool whether or not to print the results of the timing @_collect_garbage: #bool whether or not to garbage collect while timing @_quiet: #bool whether or not to disable the print() function's ability to output to terminal during the timing -> :class:collections.OrderedDict of stats about the timing
entailment
def mean(self): """ -> #float :func:numpy.mean of the timing intervals """ return round(np.mean(self.array), self.precision)\ if len(self.array) else None
-> #float :func:numpy.mean of the timing intervals
entailment
def median(self): """ -> #float :func:numpy.median of the timing intervals """ return round(float(np.median(self.array)), self.precision)\ if len(self.array) else None
-> #float :func:numpy.median of the timing intervals
entailment
def max(self): """ -> #float :func:numpy.max of the timing intervals """ return round(np.max(self.array), self.precision)\ if len(self.array) else None
-> #float :func:numpy.max of the timing intervals
entailment
def min(self): """ -> #float :func:numpy.min of the timing intervals """ return round(np.min(self.array), self.precision)\ if len(self.array) else None
-> #float :func:numpy.min of the timing intervals
entailment
def stdev(self): """ -> #float :func:numpy.std of the timing intervals """ return round(np.std(self.array), self.precision)\ if len(self.array) else None
-> #float :func:numpy.std of the timing intervals
entailment
def stats(self): """ -> :class:collections.OrderedDict of stats about the time intervals """ return OrderedDict([ ("Intervals", len(self.array)), ("Mean", self.format_time(self.mean or 0)), ("Min", self.format_time(self.min or 0)), ("Median", self.format_time(self.median or 0)), ("Max", self.format_time(self.max or 0)), ("St. Dev.", self.format_time(self.stdev or 0)), ("Total", self.format_time(self.exectime or 0)), ])
-> :class:collections.OrderedDict of stats about the time intervals
entailment
def _pct_diff(self, best, other): """ Calculates and colorizes the percent difference between @best and @other """ return colorize("{}%".format( round(((best-other)/best)*100, 2)).rjust(10), "red")
Calculates and colorizes the percent difference between @best and @other
entailment
def info(self, verbose=None): """ Prints and formats the results of the timing @_print: #bool whether or not to print out to terminal @verbose: #bool True if you'd like to print the individual timing results in additions to the comparison results """ if self.name: flag(bold(self.name)) flag("Results after {} intervals".format( bold(self.num_intervals, close=False)), colors.notice_color, padding="top") line("‒") verbose = verbose if verbose is not None else self.verbose if verbose: for result in self._callable_results: result.info() line() diffs = [ (i, result.mean) for i, result in enumerate(self._callable_results) if result.mean] ranking = [ (i, self._callable_results[i].format_time(r)) for i, r in sorted(diffs, key=lambda x: x[1])] max_rlen = len(str(len(ranking)))+2 max_rlen2 = max(len(r) for i, r in ranking)+1 best = self._callable_results[ranking[0][0]].mean for idx, (i, rank) in enumerate(ranking, 1): _obj_name = Look(self._callables[i]).objname() pct = "".rjust(10) if idx == 1 else \ self._pct_diff(best, self._callable_results[i].mean) print( ("#"+str(idx)+" ¦").rjust(max_rlen), rank.rjust(max_rlen2), pct, "{}".format(_obj_name)) line("‒", padding="bottom")
Prints and formats the results of the timing @_print: #bool whether or not to print out to terminal @verbose: #bool True if you'd like to print the individual timing results in additions to the comparison results
entailment
def fi_iban_load_map(filename: str) -> dict: """ Loads Finnish monetary institution codes and BICs in CSV format. Map which is based on 3 digits as in FIXX<3 digits>. Can be used to map Finnish IBAN number to bank information. Format: dict('<3 digits': (BIC, name), ...) :param filename: CSV file name of the BIC definitions. Columns: National ID, BIC Code, Institution Name """ out = {} with open(filename, 'rt') as fp: lines = [line.strip().split(',') for line in fp.readlines()] ver = lines.pop(0) head = lines.pop(0) if head != ['National ID', 'BIC Code', 'Financial Institution Name']: raise ValidationError('Incompatible file content in {}'.format(filename)) for line in lines: if len(line) == 3 and line[0]: nat_id = str(line[0]).strip() bic_code = line[1].strip() name = line[2].strip() out[nat_id] = (bic_code, name) return out
Loads Finnish monetary institution codes and BICs in CSV format. Map which is based on 3 digits as in FIXX<3 digits>. Can be used to map Finnish IBAN number to bank information. Format: dict('<3 digits': (BIC, name), ...) :param filename: CSV file name of the BIC definitions. Columns: National ID, BIC Code, Institution Name
entailment
def async_lru(size=100): """ An LRU cache for asyncio coroutines in Python 3.5 .. @async_lru(1024) async def slow_coroutine(*args, **kwargs): return await some_other_slow_coroutine() .. """ cache = collections.OrderedDict() def decorator(fn): @wraps(fn) @asyncio.coroutine def memoizer(*args, **kwargs): key = str((args, kwargs)) try: result = cache.pop(key) cache[key] = result except KeyError: if len(cache) >= size: cache.popitem(last=False) result = cache[key] = yield from fn(*args, **kwargs) return result return memoizer return decorator
An LRU cache for asyncio coroutines in Python 3.5 .. @async_lru(1024) async def slow_coroutine(*args, **kwargs): return await some_other_slow_coroutine() ..
entailment
def choices_label(choices: tuple, value) -> str: """ Iterates (value,label) list and returns label matching the choice :param choices: [(choice1, label1), (choice2, label2), ...] :param value: Value to find :return: label or None """ for key, label in choices: if key == value: return label return ''
Iterates (value,label) list and returns label matching the choice :param choices: [(choice1, label1), (choice2, label2), ...] :param value: Value to find :return: label or None
entailment
def info(msg, *args, **kw): # type: (str, *Any, **Any) -> None """ Print sys message to stdout. System messages should inform about the flow of the script. This should be a major milestones during the build. """ if len(args) or len(kw): msg = msg.format(*args, **kw) shell.cprint('-- <32>{}<0>'.format(msg))
Print sys message to stdout. System messages should inform about the flow of the script. This should be a major milestones during the build.
entailment
def err(msg, *args, **kw): # type: (str, *Any, **Any) -> None """ Per step status messages Use this locally in a command definition to highlight more important information. """ if len(args) or len(kw): msg = msg.format(*args, **kw) shell.cprint('-- <31>{}<0>'.format(msg))
Per step status messages Use this locally in a command definition to highlight more important information.
entailment
def is_username(string, minlen=1, maxlen=15): """ Determines whether the @string pattern is username-like @string: #str being tested @minlen: minimum required username length @maxlen: maximum username length -> #bool """ if string: string = string.strip() return username_re.match(string) and (minlen <= len(string) <= maxlen) return False
Determines whether the @string pattern is username-like @string: #str being tested @minlen: minimum required username length @maxlen: maximum username length -> #bool
entailment
def bigint_to_string(val): """ Converts @val to a string if it is a big integer (|>2**53-1|) @val: #int or #float -> #str if @val is a big integer, otherwise @val """ if isinstance(val, _NUMBERS) and not abs(val) <= 2**53-1: return str(val) return val
Converts @val to a string if it is a big integer (|>2**53-1|) @val: #int or #float -> #str if @val is a big integer, otherwise @val
entailment
def rbigint_to_string(obj): """ Recursively converts big integers (|>2**53-1|) to strings @obj: Any python object -> @obj, with any big integers converted to #str objects """ if isinstance(obj, (str, bytes)) or not obj: # the input is the desired one, return as is return obj elif hasattr(obj, 'items'): # the input is a dict {} for k, item in obj.items(): obj[k] = rbigint_to_string(item) return obj elif hasattr(obj, '__iter__'): # the input is iterable is_tuple = isinstance(obj, tuple) if is_tuple: obj = list(obj) for i, item in enumerate(obj): obj[i] = rbigint_to_string(item) return obj if not is_tuple else tuple(obj) return bigint_to_string(obj)
Recursively converts big integers (|>2**53-1|) to strings @obj: Any python object -> @obj, with any big integers converted to #str objects
entailment
def remove_blank_lines(string): """ Removes all blank lines in @string -> #str without blank lines """ return "\n".join(line for line in string.split("\n") if len(line.strip()))
Removes all blank lines in @string -> #str without blank lines
entailment
def _manage_cmd(cmd, settings=None): # type: () -> None """ Run django ./manage.py command manually. This function eliminates the need for having ``manage.py`` (reduces file clutter). """ import sys from os import environ from peltak.core import conf from peltak.core import context from peltak.core import log sys.path.insert(0, conf.get('src_dir')) settings = settings or conf.get('django.settings', None) environ.setdefault("DJANGO_SETTINGS_MODULE", settings) args = sys.argv[0:-1] + cmd if context.get('pretend', False): log.info("Would run the following manage command:\n<90>{}", args) else: from django.core.management import execute_from_command_line execute_from_command_line(args)
Run django ./manage.py command manually. This function eliminates the need for having ``manage.py`` (reduces file clutter).
entailment
def current_branch(): # type: () -> BranchDetails """ Return the BranchDetails for the current branch. Return: BranchDetails: The details of the current branch. """ cmd = 'git symbolic-ref --short HEAD' branch_name = shell.run( cmd, capture=True, never_pretend=True ).stdout.strip() return BranchDetails.parse(branch_name)
Return the BranchDetails for the current branch. Return: BranchDetails: The details of the current branch.
entailment
def commit_branches(sha1): # type: (str) -> List[str] """ Get the name of the branches that this commit belongs to. """ cmd = 'git branch --contains {}'.format(sha1) return shell.run( cmd, capture=True, never_pretend=True ).stdout.strip().split()
Get the name of the branches that this commit belongs to.
entailment
def guess_base_branch(): # type: (str) -> Optional[str, None] """ Try to guess the base branch for the current branch. Do not trust this guess. git makes it pretty much impossible to guess the base branch reliably so this function implements few heuristics that will work on most common use cases but anything a bit crazy will probably trip this function. Returns: Optional[str]: The name of the base branch for the current branch if guessable or **None** if can't guess. """ my_branch = current_branch(refresh=True).name curr = latest_commit() if len(curr.branches) > 1: # We're possibly at the beginning of the new branch (currently both # on base and new branch). other = [x for x in curr.branches if x != my_branch] if len(other) == 1: return other[0] return None else: # We're on one branch parent = curr while parent and my_branch in parent.branches: curr = parent if len(curr.branches) > 1: other = [x for x in curr.branches if x != my_branch] if len(other) == 1: return other[0] return None parents = [p for p in curr.parents if my_branch in p.branches] num_parents = len(parents) if num_parents > 2: # More than two parent, give up return None if num_parents == 2: # This is a merge commit. for p in parents: if p.branches == [my_branch]: parent = p break elif num_parents == 1: parent = parents[0] elif num_parents == 0: parent = None return None
Try to guess the base branch for the current branch. Do not trust this guess. git makes it pretty much impossible to guess the base branch reliably so this function implements few heuristics that will work on most common use cases but anything a bit crazy will probably trip this function. Returns: Optional[str]: The name of the base branch for the current branch if guessable or **None** if can't guess.
entailment
def commit_author(sha1=''): # type: (str) -> Author """ Return the author of the given commit. Args: sha1 (str): The sha1 of the commit to query. If not given, it will return the sha1 for the current commit. Returns: Author: A named tuple ``(name, email)`` with the commit author details. """ with conf.within_proj_dir(): cmd = 'git show -s --format="%an||%ae" {}'.format(sha1) result = shell.run( cmd, capture=True, never_pretend=True ).stdout name, email = result.split('||') return Author(name, email)
Return the author of the given commit. Args: sha1 (str): The sha1 of the commit to query. If not given, it will return the sha1 for the current commit. Returns: Author: A named tuple ``(name, email)`` with the commit author details.
entailment
def unstaged(): # type: () -> List[str] """ Return a list of unstaged files in the project repository. Returns: list[str]: The list of files not tracked by project git repo. """ with conf.within_proj_dir(): status = shell.run( 'git status --porcelain', capture=True, never_pretend=True ).stdout results = [] for file_status in status.split(os.linesep): if file_status.strip() and file_status[0] == ' ': results.append(file_status[3:].strip()) return results
Return a list of unstaged files in the project repository. Returns: list[str]: The list of files not tracked by project git repo.
entailment
def ignore(): # type: () -> List[str] """ Return a list of patterns in the project .gitignore Returns: list[str]: List of patterns set to be ignored by git. """ def parse_line(line): # pylint: disable=missing-docstring # Decode if necessary if not isinstance(line, string_types): line = line.decode('utf-8') # Strip comment line = line.split('#', 1)[0].strip() return line ignore_files = [ conf.proj_path('.gitignore'), conf.proj_path('.git/info/exclude'), config().get('core.excludesfile') ] result = [] for ignore_file in ignore_files: if not (ignore_file and os.path.exists(ignore_file)): continue with open(ignore_file) as fp: parsed = (parse_line(l) for l in fp.readlines()) result += [x for x in parsed if x] return result
Return a list of patterns in the project .gitignore Returns: list[str]: List of patterns set to be ignored by git.
entailment
def branches(): # type: () -> List[str] """ Return a list of branches in the current repo. Returns: list[str]: A list of branches in the current repo. """ out = shell.run( 'git branch', capture=True, never_pretend=True ).stdout.strip() return [x.strip('* \t\n') for x in out.splitlines()]
Return a list of branches in the current repo. Returns: list[str]: A list of branches in the current repo.
entailment
def tag(name, message, author=None): # type: (str, str, Author, bool) -> None """ Tag the current commit. Args: name (str): The tag name. message (str): The tag message. Same as ``-m`` parameter in ``git tag``. author (Author): The commit author. Will default to the author of the commit. pretend (bool): If set to **True** it will print the full ``git tag`` command instead of actually executing it. """ cmd = ( 'git -c "user.name={author.name}" -c "user.email={author.email}" ' 'tag -a "{name}" -m "{message}"' ).format( author=author or latest_commit().author, name=name, message=message.replace('"', '\\"').replace('`', '\\`'), ) shell.run(cmd)
Tag the current commit. Args: name (str): The tag name. message (str): The tag message. Same as ``-m`` parameter in ``git tag``. author (Author): The commit author. Will default to the author of the commit. pretend (bool): If set to **True** it will print the full ``git tag`` command instead of actually executing it.
entailment
def config(): # type: () -> dict[str, Any] """ Return the current git configuration. Returns: dict[str, Any]: The current git config taken from ``git config --list``. """ out = shell.run( 'git config --list', capture=True, never_pretend=True ).stdout.strip() result = {} for line in out.splitlines(): name, value = line.split('=', 1) result[name.strip()] = value.strip() return result
Return the current git configuration. Returns: dict[str, Any]: The current git config taken from ``git config --list``.
entailment
def tags(): # type: () -> List[str] """ Returns all tags in the repo. Returns: list[str]: List of all tags in the repo, sorted as versions. All tags returned by this function will be parsed as if the contained versions (using ``v:refname`` sorting). """ return shell.run( 'git tag --sort=v:refname', capture=True, never_pretend=True ).stdout.strip().splitlines()
Returns all tags in the repo. Returns: list[str]: List of all tags in the repo, sorted as versions. All tags returned by this function will be parsed as if the contained versions (using ``v:refname`` sorting).
entailment
def verify_branch(branch_name): # type: (str) -> bool """ Verify if the given branch exists. Args: branch_name (str): The name of the branch to check. Returns: bool: **True** if a branch with name *branch_name* exits, **False** otherwise. """ try: shell.run( 'git rev-parse --verify {}'.format(branch_name), never_pretend=True ) return True except IOError: return False
Verify if the given branch exists. Args: branch_name (str): The name of the branch to check. Returns: bool: **True** if a branch with name *branch_name* exits, **False** otherwise.
entailment
def protected_branches(): # type: () -> list[str] """ Return branches protected by deletion. By default those are master and devel branches as configured in pelconf. Returns: list[str]: Names of important branches that should not be deleted. """ master = conf.get('git.master_branch', 'master') develop = conf.get('git.devel_branch', 'develop') return conf.get('git.protected_branches', (master, develop))
Return branches protected by deletion. By default those are master and devel branches as configured in pelconf. Returns: list[str]: Names of important branches that should not be deleted.
entailment
def branches(self): # type: () -> List[str] """ List of all branches this commit is a part of. """ if self._branches is None: cmd = 'git branch --contains {}'.format(self.sha1) out = shell.run( cmd, capture=True, never_pretend=True ).stdout.strip() self._branches = [x.strip('* \t\n') for x in out.splitlines()] return self._branches
List of all branches this commit is a part of.
entailment
def parents(self): # type: () -> List[CommitDetails] """ Parents of the this commit. """ if self._parents is None: self._parents = [CommitDetails.get(x) for x in self.parents_sha1] return self._parents
Parents of the this commit.
entailment
def number(self): # type: () -> int """ Return this commits number. This is the same as the total number of commits in history up until this commit. This value can be useful in some CI scenarios as it allows to track progress on any given branch (although there can be two commits with the same number existing on different branches). Returns: int: The commit number/index. """ cmd = 'git log --oneline {}'.format(self.sha1) out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip() return len(out.splitlines())
Return this commits number. This is the same as the total number of commits in history up until this commit. This value can be useful in some CI scenarios as it allows to track progress on any given branch (although there can be two commits with the same number existing on different branches). Returns: int: The commit number/index.
entailment
def get(cls, sha1=''): # type: (str) -> CommitDetails """ Return details about a given commit. Args: sha1 (str): The sha1 of the commit to query. If not given, it will return the details for the latest commit. Returns: CommitDetails: Commit details. You can use the instance of the class to query git tree further. """ with conf.within_proj_dir(): cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format( sha1 ) result = shell.run(cmd, capture=True, never_pretend=True).stdout sha1, name, email, title, desc, parents = result.split('||') return CommitDetails( sha1=sha1, author=Author(name, email), title=title, desc=desc, parents_sha1=parents.split(), )
Return details about a given commit. Args: sha1 (str): The sha1 of the commit to query. If not given, it will return the details for the latest commit. Returns: CommitDetails: Commit details. You can use the instance of the class to query git tree further.
entailment
def main(): """Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None """ log = logging.getLogger(Logify.get_name() + '.logify.main') log.info('logger name is: %s', Logify.get_name()) log.debug('This is DEBUG') log.info('This is INFO') log.warning('This is a WARNING') log.error('This is an ERROR')
Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None
entailment
def set_log_level(cls, log_level): """Sets the log level for cons3rt assets This method sets the logging level for cons3rt assets using pycons3rt. The loglevel is read in from a deployment property called loglevel and set appropriately. :type log_level: str :return: True if log level was set, False otherwise. """ log = logging.getLogger(cls.cls_logger + '.set_log_level') log.info('Attempting to set the log level...') if log_level is None: log.info('Arg loglevel was None, log level will not be updated.') return False if not isinstance(log_level, basestring): log.error('Passed arg loglevel must be a string') return False log_level = log_level.upper() log.info('Attempting to set log level to: %s...', log_level) if log_level == 'DEBUG': cls._logger.setLevel(logging.DEBUG) elif log_level == 'INFO': cls._logger.setLevel(logging.INFO) elif log_level == 'WARN': cls._logger.setLevel(logging.WARN) elif log_level == 'WARNING': cls._logger.setLevel(logging.WARN) elif log_level == 'ERROR': cls._logger.setLevel(logging.ERROR) else: log.error('Could not set log level, this is not a valid log level: %s', log_level) return False log.info('pycons3rt loglevel set to: %s', log_level) return True
Sets the log level for cons3rt assets This method sets the logging level for cons3rt assets using pycons3rt. The loglevel is read in from a deployment property called loglevel and set appropriately. :type log_level: str :return: True if log level was set, False otherwise.
entailment
def get_record_by_name(self, index, name): """ Searches for a single document in the given index on the 'name' field . Performs a case-insensitive search by utilizing Elasticsearch's `match_phrase` query. Args: index: `str`. The name of an Elasticsearch index (i.e. biosamples). name: `str`. The value of a document's name key to search for. Returns: `dict` containing the document that was indexed into Elasticsearch. Raises: `MultipleHitsException`: More than 1 hit is returned. """ result = self.ES.search( index=index, body={ "query": { "match_phrase": { "name": name, } } } ) hits = result["hits"]["hits"] if not hits: return {} elif len(hits) == 1: return hits[0]["_source"] else: # Mult. records found with same prefix. See if a single record whose name attr matches # the match phrase exactly (in a lower-case comparison). for h in hits: source = h["_source"] record_name = source["name"] if record_name.lower().strip() == name.lower().strip(): return source msg = "match_phrase search found multiple records matching query '{}' for index '{}'.".format(name, index) raise MultipleHitsException(msg)
Searches for a single document in the given index on the 'name' field . Performs a case-insensitive search by utilizing Elasticsearch's `match_phrase` query. Args: index: `str`. The name of an Elasticsearch index (i.e. biosamples). name: `str`. The value of a document's name key to search for. Returns: `dict` containing the document that was indexed into Elasticsearch. Raises: `MultipleHitsException`: More than 1 hit is returned.
entailment
def getattr_in(obj, name): """ Finds an in @obj via a period-delimited string @name. @obj: (#object) @name: (#str) |.|-separated keys to search @obj in .. obj.deep.attr = 'deep value' getattr_in(obj, 'obj.deep.attr') .. |'deep value'| """ for part in name.split('.'): obj = getattr(obj, part) return obj
Finds an in @obj via a period-delimited string @name. @obj: (#object) @name: (#str) |.|-separated keys to search @obj in .. obj.deep.attr = 'deep value' getattr_in(obj, 'obj.deep.attr') .. |'deep value'|
entailment
def import_from(name): """ Imports a module, class or method from string and unwraps it if wrapped by functools @name: (#str) name of the python object -> imported object """ obj = name if isinstance(name, str) and len(name): try: obj = locate(name) assert obj is not None except (AttributeError, TypeError, AssertionError, ErrorDuringImport): try: name = name.split(".") attr = name[-1] name = ".".join(name[:-1]) mod = importlib.import_module(name) obj = getattr(mod, attr) except (SyntaxError, AttributeError, ImportError, ValueError): try: name = name.split(".") attr_sup = name[-1] name = ".".join(name[:-1]) mod = importlib.import_module(name) obj = getattr(getattr(mod, attr_sup), attr) except: # We give up. pass obj = unwrap_obj(obj) return obj
Imports a module, class or method from string and unwraps it if wrapped by functools @name: (#str) name of the python object -> imported object
entailment
def unwrap_obj(obj): """ Gets the actual object from a decorated or wrapped function @obj: (#object) the object to unwrap """ try: obj = obj.fget except (AttributeError, TypeError): pass try: # Cached properties if obj.func.__doc__ == obj.__doc__: obj = obj.func except AttributeError: pass try: # Setter/Getters obj = obj.getter except AttributeError: pass try: # Wrapped Funcs obj = inspect.unwrap(obj) except: pass return obj
Gets the actual object from a decorated or wrapped function @obj: (#object) the object to unwrap
entailment
def add_modality(output_path, modality): """Modality can be appended to the file name (such as 'bold') or use in the folder (such as "func"). You should always use the specific modality ('bold'). This function converts it to the folder name. """ if modality is None: return output_path else: if modality in ('T1w', 'T2star', 'FLAIR', 'PD'): modality = 'anat' elif modality == 'bold': modality = 'func' elif modality == 'epi': # topup modality = 'fmap' elif modality in ('electrodes', 'coordsystem', 'channels'): modality = 'ieeg' elif modality == 'events': raise ValueError('modality "events" is ambiguous (can be in folder "ieeg" or "func"). Assuming "ieeg"') return output_path / modality
Modality can be appended to the file name (such as 'bold') or use in the folder (such as "func"). You should always use the specific modality ('bold'). This function converts it to the folder name.
entailment
def load(): # type: () -> None """ Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML. """ with within_proj_dir(): if os.path.exists('pelconf.yaml'): load_yaml_config('pelconf.yaml') if os.path.exists('pelconf.py'): load_py_config('pelconf.py')
Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML.
entailment
def load_yaml_config(conf_file): # type: (str) -> None """ Load a YAML configuration. This will not update the configuration but replace it entirely. Args: conf_file (str): Path to the YAML config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid YAML file. """ global g_config with open(conf_file) as fp: # Initialize config g_config = util.yaml_load(fp) # Add src_dir to sys.paths if it's set. This is only done with YAML # configs, py configs have to do this manually. src_dir = get_path('src_dir', None) if src_dir is not None: sys.path.insert(0, src_dir) for cmd in get('commands', []): _import(cmd)
Load a YAML configuration. This will not update the configuration but replace it entirely. Args: conf_file (str): Path to the YAML config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid YAML file.
entailment
def load_py_config(conf_file): # type: (str) -> None """ Import configuration from a python file. This will just import the file into python. Sky is the limit. The file has to deal with the configuration all by itself (i.e. call conf.init()). You will also need to add your src directory to sys.paths if it's not the current working directory. This is done automatically if you use yaml config as well. Args: conf_file (str): Path to the py module config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid python file. """ if sys.version_info >= (3, 5): from importlib import util spec = util.spec_from_file_location('pelconf', conf_file) mod = util.module_from_spec(spec) spec.loader.exec_module(mod) elif sys.version_info >= (3, 3): from importlib import machinery loader = machinery.SourceFileLoader('pelconf', conf_file) _ = loader.load_module() elif sys.version_info <= (3, 0): import imp imp.load_source('pelconf', conf_file)
Import configuration from a python file. This will just import the file into python. Sky is the limit. The file has to deal with the configuration all by itself (i.e. call conf.init()). You will also need to add your src directory to sys.paths if it's not the current working directory. This is done automatically if you use yaml config as well. Args: conf_file (str): Path to the py module config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid python file.
entailment
def load_template(filename): # type: (str) -> str """ Load template from file. The templates are part of the package and must be included as ``package_data`` in project ``setup.py``. Args: filename (str): The template path. Relative to `peltak` package directory. Returns: str: The content of the chosen template. """ template_file = os.path.join(PKG_DIR, 'templates', filename) with open(template_file) as fp: return fp.read()
Load template from file. The templates are part of the package and must be included as ``package_data`` in project ``setup.py``. Args: filename (str): The template path. Relative to `peltak` package directory. Returns: str: The content of the chosen template.
entailment
def proj_path(*path_parts): # type: (str) -> str """ Return absolute path to the repo dir (root project directory). Args: path (str): The path relative to the project root (pelconf.yaml). Returns: str: The given path converted to an absolute path. """ path_parts = path_parts or ['.'] # If path represented by path_parts is absolute, do not modify it. if not os.path.isabs(path_parts[0]): proj_path = _find_proj_root() if proj_path is not None: path_parts = [proj_path] + list(path_parts) return os.path.normpath(os.path.join(*path_parts))
Return absolute path to the repo dir (root project directory). Args: path (str): The path relative to the project root (pelconf.yaml). Returns: str: The given path converted to an absolute path.
entailment
def within_proj_dir(path='.'): # type: (Optional[str]) -> str """ Return an absolute path to the given project relative path. :param path: Project relative path that will be converted to the system wide absolute path. :return: Absolute path. """ curr_dir = os.getcwd() os.chdir(proj_path(path)) yield os.chdir(curr_dir)
Return an absolute path to the given project relative path. :param path: Project relative path that will be converted to the system wide absolute path. :return: Absolute path.
entailment
def get(name, *default): # type: (str, Any) -> Any """ Get config value with the given name and optional default. Args: name (str): The name of the config value. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ global g_config curr = g_config for part in name.split('.'): if part in curr: curr = curr[part] elif default: return default[0] else: raise AttributeError("Config value '{}' does not exist".format( name )) return curr
Get config value with the given name and optional default. Args: name (str): The name of the config value. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given.
entailment
def get_path(name, *default): # type: (str, Any) -> Any """ Get config value as path relative to the project directory. This allows easily defining the project configuration within the fabfile as always relative to that fabfile. Args: name (str): The name of the config value containing the path. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ global g_config value = get(name, *default) if value is None: return None return proj_path(value)
Get config value as path relative to the project directory. This allows easily defining the project configuration within the fabfile as always relative to that fabfile. Args: name (str): The name of the config value containing the path. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given.
entailment
def _find_proj_root(): # type: () -> Optional[str] """ Find the project path by going up the file tree. This will look in the current directory and upwards for the pelconf file (.yaml or .py) """ proj_files = frozenset(('pelconf.py', 'pelconf.yaml')) curr = os.getcwd() while curr.startswith('/') and len(curr) > 1: if proj_files & frozenset(os.listdir(curr)): return curr else: curr = os.path.dirname(curr) return None
Find the project path by going up the file tree. This will look in the current directory and upwards for the pelconf file (.yaml or .py)
entailment
def verify(verified_entity, verification_key): """ Метод должен райзить ошибки :param verified_entity: сущность :param verification_key: ключ :return: """ verification = get_object_or_none(Verification, verified_entity=verified_entity) if verification is None: raise ServerError(VerificationHandler.STATUS_VERIFICATION_NOT_FOUND) if not verification.verify(verification_key): raise ServerError(VerificationHandler.STATUS_INVALID_VERIFICATION_KEY) verification.verified = True verification.save()
Метод должен райзить ошибки :param verified_entity: сущность :param verification_key: ключ :return:
entailment
def _xml_element_value(el: Element, int_tags: list): """ Gets XML Element value. :param el: Element :param int_tags: List of tags that should be treated as ints :return: value of the element (int/str) """ # None if el.text is None: return None # int try: if el.tag in int_tags: return int(el.text) except: pass # default to str if not empty s = str(el.text).strip() return s if s else None
Gets XML Element value. :param el: Element :param int_tags: List of tags that should be treated as ints :return: value of the element (int/str)
entailment
def _xml_tag_filter(s: str, strip_namespaces: bool) -> str: """ Returns tag name and optionally strips namespaces. :param el: Element :param strip_namespaces: Strip namespace prefix :return: str """ if strip_namespaces: ns_end = s.find('}') if ns_end != -1: s = s[ns_end+1:] else: ns_end = s.find(':') if ns_end != -1: s = s[ns_end+1:] return s
Returns tag name and optionally strips namespaces. :param el: Element :param strip_namespaces: Strip namespace prefix :return: str
entailment
def xml_to_dict(xml_bytes: bytes, tags: list=[], array_tags: list=[], int_tags: list=[], strip_namespaces: bool=True, parse_attributes: bool=True, value_key: str='@', attribute_prefix: str='@', document_tag: bool=False) -> dict: """ Parses XML string to dict. In case of simple elements (no children, no attributes) value is stored as is. For complex elements value is stored in key '@', attributes '@xxx' and children as sub-dicts. Optionally strips namespaces. For example: <Doc version="1.2"> <A class="x"> <B class="x2">hello</B> </A> <A class="y"> <B class="y2">world</B> </A> <C>value node</C> </Doc> is returned as follows: {'@version': '1.2', 'A': [{'@class': 'x', 'B': {'@': 'hello', '@class': 'x2'}}, {'@class': 'y', 'B': {'@': 'world', '@class': 'y2'}}], 'C': 'value node'} Args: xml_bytes: XML file contents in bytes tags: list of tags to parse (pass empty to return all chilren of top-level tag) array_tags: list of tags that should be treated as arrays by default int_tags: list of tags that should be treated as ints strip_namespaces: if true namespaces will be stripped parse_attributes: Elements with attributes are stored as complex types with '@' identifying text value and @xxx identifying each attribute value_key: Key to store (complex) element value. Default is '@' attribute_prefix: Key prefix to store element attribute values. Default is '@' document_tag: Set True if Document root tag should be included as well Returns: dict """ from xml.etree import ElementTree as ET root = ET.fromstring(xml_bytes) if tags: if document_tag: raise Exception('xml_to_dict: document_tag=True does not make sense when using selective tag list since selective tag list finds tags from the whole document, not only directly under root document tag') root_elements = [] for tag in tags: root_elements.extend(root.iter(tag)) else: root_elements = list(root) data = {} for el in root_elements: _xml_set_element_data_r(data, el, array_tags=array_tags, int_tags=int_tags, strip_namespaces=strip_namespaces, parse_attributes=parse_attributes, value_key=value_key, attribute_prefix=attribute_prefix) # set root attributes if parse_attributes: for a_key, a_val in root.attrib.items(): data[attribute_prefix + _xml_tag_filter(a_key, strip_namespaces)] = a_val return data if not document_tag else {root.tag: data}
Parses XML string to dict. In case of simple elements (no children, no attributes) value is stored as is. For complex elements value is stored in key '@', attributes '@xxx' and children as sub-dicts. Optionally strips namespaces. For example: <Doc version="1.2"> <A class="x"> <B class="x2">hello</B> </A> <A class="y"> <B class="y2">world</B> </A> <C>value node</C> </Doc> is returned as follows: {'@version': '1.2', 'A': [{'@class': 'x', 'B': {'@': 'hello', '@class': 'x2'}}, {'@class': 'y', 'B': {'@': 'world', '@class': 'y2'}}], 'C': 'value node'} Args: xml_bytes: XML file contents in bytes tags: list of tags to parse (pass empty to return all chilren of top-level tag) array_tags: list of tags that should be treated as arrays by default int_tags: list of tags that should be treated as ints strip_namespaces: if true namespaces will be stripped parse_attributes: Elements with attributes are stored as complex types with '@' identifying text value and @xxx identifying each attribute value_key: Key to store (complex) element value. Default is '@' attribute_prefix: Key prefix to store element attribute values. Default is '@' document_tag: Set True if Document root tag should be included as well Returns: dict
entailment
def dict_to_element(doc: dict, value_key: str='@', attribute_prefix: str='@') -> Element: """ Generates XML Element from dict. Generates complex elements by assuming element attributes are prefixed with '@', and value is stored to plain '@' in case of complex element. Children are sub-dicts. For example: { 'Doc': { '@version': '1.2', 'A': [{'@class': 'x', 'B': {'@': 'hello', '@class': 'x2'}}, {'@class': 'y', 'B': {'@': 'world', '@class': 'y2'}}], 'C': 'value node', } } is returned as follows: <?xml version="1.0" ?> <Doc version="1.2"> <A class="x"> <B class="x2">hello</B> </A> <A class="y"> <B class="y2">world</B> </A> <C>value node</C> </Doc> Args: doc: dict. Must have sigle root key dict. value_key: Key to store (complex) element value. Default is '@' attribute_prefix: Key prefix to store element attribute values. Default is '@' Returns: xml.etree.ElementTree.Element """ from xml.etree import ElementTree as ET if len(doc) != 1: raise Exception('Invalid data dict for XML generation, document root must have single element') for tag, data in doc.items(): el = ET.Element(tag) assert isinstance(el, Element) _xml_element_set_data_r(el, data, value_key, attribute_prefix) return el
Generates XML Element from dict. Generates complex elements by assuming element attributes are prefixed with '@', and value is stored to plain '@' in case of complex element. Children are sub-dicts. For example: { 'Doc': { '@version': '1.2', 'A': [{'@class': 'x', 'B': {'@': 'hello', '@class': 'x2'}}, {'@class': 'y', 'B': {'@': 'world', '@class': 'y2'}}], 'C': 'value node', } } is returned as follows: <?xml version="1.0" ?> <Doc version="1.2"> <A class="x"> <B class="x2">hello</B> </A> <A class="y"> <B class="y2">world</B> </A> <C>value node</C> </Doc> Args: doc: dict. Must have sigle root key dict. value_key: Key to store (complex) element value. Default is '@' attribute_prefix: Key prefix to store element attribute values. Default is '@' Returns: xml.etree.ElementTree.Element
entailment
def local_property(): """ Property structure which maps within the :func:local() thread (c)2014, Marcel Hellkamp """ ls = local() def fget(self): try: return ls.var except AttributeError: raise RuntimeError("Request context not initialized.") def fset(self, value): ls.var = value def fdel(self): del ls.var return property(fget, fset, fdel, 'Thread-local property')
Property structure which maps within the :func:local() thread (c)2014, Marcel Hellkamp
entailment
def download(download_info): """Module method for downloading from S3 This public module method takes a key and the full path to the destination directory, assumes that the args have been validated by the public caller methods, and attempts to download the specified key to the dest_dir. :param download_info: (dict) Contains the following params key: (str) S3 key for the file to be downloaded dest_dir: (str) Full path destination directory bucket_name: (str) Name of the bucket to download from credentials: (dict) containing AWS credential info (optional) aws_region: (str) AWS S3 region aws_access_key_id: (str) AWS access key ID aws_secret_access_key: (str) AWS secret access key :return: (str) Downloaded file destination if the file was downloaded successfully :raises S3UtilError """ log = logging.getLogger(mod_logger + '.download') # Ensure the passed arg is a dict if not isinstance(download_info, dict): msg = 'download_info arg should be a dict, found: {t}'.format(t=download_info.__class__.__name__) raise TypeError(msg) # Check for and obtain required args required_args = ['key', 'dest_dir', 'bucket_name'] for required_arg in required_args: if required_arg not in download_info: msg = 'Required arg not provided: {r}'.format(r=required_arg) log.error(msg) raise S3UtilError(msg) log.debug('Processing download request: {r}'.format(r=download_info)) key = download_info['key'] dest_dir = download_info['dest_dir'] bucket_name = download_info['bucket_name'] region_name = None aws_access_key_id = None aws_secret_access_key = None try: creds = download_info['credentials'] except KeyError: log.debug('No credentials found for this download request') else: try: region_name = creds['region_name'] aws_access_key_id = creds['aws_access_key_id'] aws_secret_access_key = creds['aws_secret_access_key'] except KeyError: log.warn('Insufficient credentials found for download request') region_name = None aws_access_key_id = None aws_secret_access_key = None log.debug('Configuring S3 client with AWS Access key ID {k} and region {r}'.format( k=aws_access_key_id, r=region_name)) # Establish an S3 client client = boto3.client('s3', region_name=region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) # Attempt to determine the file name from key filename = key.split('/')[-1] if filename is None: msg = 'Could not determine the filename from key: {k}'.format(k=key) log.error(msg) raise S3UtilError(msg) # Set the destination destination = os.path.join(dest_dir, filename) # Return if the destination file was already downloaded if os.path.isfile(destination): log.info('File already downloaded: {d}'.format(d=destination)) return destination # Attempt the download log.info('Attempting to download %s from bucket %s to destination %s', key, bucket_name, destination) max_tries = 10 retry_timer = 5 count = 1 while count <= max_tries: log.info('Attempting to download file {k}: try {c} of {m}'.format(k=key, c=count, m=max_tries)) try: client.download_file(Bucket=bucket_name, Key=key, Filename=destination) except ClientError: if count >= max_tries: _, ex, trace = sys.exc_info() msg = 'Unable to download key {k} from S3 bucket {b}:\n{e}'.format(k=key, b=bucket_name, e=str(ex)) log.error(msg) raise S3UtilError, msg, trace else: log.warn('Download failed, re-trying in {t} sec...'.format(t=retry_timer)) count += 1 time.sleep(retry_timer) continue else: log.info('Successfully downloaded {k} from S3 bucket {b} to: {d}'.format( k=key, b=bucket_name, d=destination)) return destination
Module method for downloading from S3 This public module method takes a key and the full path to the destination directory, assumes that the args have been validated by the public caller methods, and attempts to download the specified key to the dest_dir. :param download_info: (dict) Contains the following params key: (str) S3 key for the file to be downloaded dest_dir: (str) Full path destination directory bucket_name: (str) Name of the bucket to download from credentials: (dict) containing AWS credential info (optional) aws_region: (str) AWS S3 region aws_access_key_id: (str) AWS access key ID aws_secret_access_key: (str) AWS secret access key :return: (str) Downloaded file destination if the file was downloaded successfully :raises S3UtilError
entailment
def find_bucket_keys(bucket_name, regex, region_name=None, aws_access_key_id=None, aws_secret_access_key=None): """Finds a list of S3 keys matching the passed regex Given a regular expression, this method searches the S3 bucket for matching keys, and returns an array of strings for matched keys, an empty array if non are found. :param regex: (str) Regular expression to use is the key search :param bucket_name: (str) String S3 bucket name :param region_name: (str) AWS region for the S3 bucket (optional) :param aws_access_key_id: (str) AWS Access Key ID (optional) :param aws_secret_access_key: (str) AWS Secret Access Key (optional) :return: Array of strings containing matched S3 keys """ log = logging.getLogger(mod_logger + '.find_bucket_keys') matched_keys = [] if not isinstance(regex, basestring): log.error('regex argument is not a string, found: {t}'.format(t=regex.__class__.__name__)) return None if not isinstance(bucket_name, basestring): log.error('bucket_name argument is not a string, found: {t}'.format(t=bucket_name.__class__.__name__)) return None # Set up S3 resources s3resource = boto3.resource('s3', region_name=region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) bucket = s3resource.Bucket(bucket_name) log.info('Looking up S3 keys based on regex: {r}'.format(r=regex)) for item in bucket.objects.all(): log.debug('Checking if regex matches key: {k}'.format(k=item.key)) match = re.search(regex, item.key) if match: matched_keys.append(item.key) log.info('Found matching keys: {k}'.format(k=matched_keys)) return matched_keys
Finds a list of S3 keys matching the passed regex Given a regular expression, this method searches the S3 bucket for matching keys, and returns an array of strings for matched keys, an empty array if non are found. :param regex: (str) Regular expression to use is the key search :param bucket_name: (str) String S3 bucket name :param region_name: (str) AWS region for the S3 bucket (optional) :param aws_access_key_id: (str) AWS Access Key ID (optional) :param aws_secret_access_key: (str) AWS Secret Access Key (optional) :return: Array of strings containing matched S3 keys
entailment
def main(): """Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None """ log = logging.getLogger(mod_logger + '.main') log.debug('This is DEBUG!') log.info('This is INFO!') log.warning('This is WARNING!') log.error('This is ERROR!') log.info('Running s3util.main...') my_bucket = 'cons3rt-deploying-cons3rt' my_regex = 'sourcebuilder.*apache-maven-.*3.3.3.*' try: s3util = S3Util(my_bucket) except S3UtilError as e: log.error('There was a problem creating S3Util:\n%s', e) else: log.info('Created S3Util successfully') key = s3util.find_key(my_regex) test = None if key is not None: test = s3util.download_file(key, '/Users/yennaco/Downloads') if test is not None: upload = s3util.upload_file(test, 'media-files-offline-assets/test') log.info('Upload result: %s', upload) log.info('End of main!')
Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None
entailment
def validate_bucket(self): """Verify the specified bucket exists This method validates that the bucket name passed in the S3Util constructor actually exists. :return: None """ log = logging.getLogger(self.cls_logger + '.validate_bucket') log.info('Attempting to get bucket: {b}'.format(b=self.bucket_name)) max_tries = 10 count = 1 while count <= max_tries: log.info('Attempting to connect to S3 bucket %s, try %s of %s', self.bucket_name, count, max_tries) try: self.s3client.head_bucket(Bucket=self.bucket_name) except ClientError as e: _, ex, trace = sys.exc_info() error_code = int(e.response['Error']['Code']) log.debug( 'Connecting to bucket %s produced response code: %s', self.bucket_name, error_code) if error_code == 404: msg = 'Error 404 response indicates that bucket {b} does not ' \ 'exist:\n{e}'.format(b=self.bucket_name, e=str(ex)) log.error(msg) raise S3UtilError, msg, trace elif error_code == 500 or error_code == 503: if count >= max_tries: msg = 'S3 bucket is not accessible at this time: {b}\n{e}'.format( b=self.bucket_name, e=str(ex)) log.error(msg) raise S3UtilError, msg, trace else: log.warn('AWS returned error code 500 or 503, re-trying in 2 sec...') time.sleep(5) count += 1 continue else: msg = 'Connecting to S3 bucket {b} returned code: {c}\n{e}'.\ format(b=self.bucket_name, c=error_code, e=str(ex)) log.error(msg) raise S3UtilError, msg, trace else: log.info('Found bucket: %s', self.bucket_name) return
Verify the specified bucket exists This method validates that the bucket name passed in the S3Util constructor actually exists. :return: None
entailment