INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Execute command on * Vim *... warning:: Do not use redir command if capture is True. It s already enabled for internal use.
def command(self, command, capture=True): """ Execute command on *Vim*. .. warning:: Do not use ``redir`` command if ``capture`` is ``True``. It's already enabled for internal use. If ``capture`` argument is set ``False``, the command execution becomes slightly faster. Example: >>> import headlessvim >>> with headlessvim.open() as vim: ... vim.command('echo 0') ... '0' >>> with headlessvim.open() as vim: ... vim.command('let g:spam = "ham"', False) ... vim.echo('g:spam') ... 'ham' :param string command: a command to execute :param boolean capture: ``True`` if command's output needs to be captured, else ``False`` :return: the output of the given command :rtype: string """ if capture: self.command('redir! >> {0}'.format(self._tempfile.name), False) self.set_mode('command') self.send_keys('{0}\n'.format(command)) if capture: self.command('redir END', False) return self._tempfile.read().strip('\n')
Set * Vim * mode to mode. Supported modes:
def set_mode(self, mode): """ Set *Vim* mode to ``mode``. Supported modes: * ``normal`` * ``insert`` * ``command`` * ``visual`` * ``visual-block`` This method behave as setter-only property. Example: >>> import headlessvim >>> with headlessvim.open() as vim: ... vim.set_mode('insert') ... vim.mode = 'normal' # also accessible as property ... :param string mode: *Vim* mode to set :raises ValueError: if ``mode`` is not supported """ keys = '\033\033' if mode == 'normal': pass elif mode == 'insert': keys += 'i' elif mode == 'command': keys += ':' elif mode == 'visual': keys += 'v' elif mode == 'visual-block': keys += 'V' else: raise ValueError('mode {0} is not supported'.format(mode)) self.send_keys(keys)
: param size: ( lines columns ) tuple of a screen connected to * Vim *.: type size: ( int int )
def screen_size(self, size): """ :param size: (lines, columns) tuple of a screen connected to *Vim*. :type size: (int, int) """ if self.screen_size != size: self._screen.resize(*self._swap(size))
: return: runtime path of * Vim *: rtype: runtimepath. RuntimePath
def runtimepath(self): """ :return: runtime path of *Vim* :rtype: runtimepath.RuntimePath """ if self._runtimepath is None: self._runtimepath = runtimepath.RuntimePath(self) return self._runtimepath
If the file - object is not seekable return ArchiveTemp of the fileobject otherwise return the file - object itself
def make_seekable(fileobj): """ If the file-object is not seekable, return ArchiveTemp of the fileobject, otherwise return the file-object itself """ if sys.version_info < (3, 0) and isinstance(fileobj, file): filename = fileobj.name fileobj = io.FileIO(fileobj.fileno(), closefd=False) fileobj.name = filename assert isinstance(fileobj, io.IOBase), \ "fileobj must be an instance of io.IOBase or a file, got %s" \ % type(fileobj) return fileobj if fileobj.seekable() \ else ArchiveTemp(fileobj)
Setup before_request after_request handlers for tracing.
def init_app(self, app): """Setup before_request, after_request handlers for tracing. """ app.config.setdefault("TRACY_REQUIRE_CLIENT", False) if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['restpoints'] = self app.before_request(self._before) app.after_request(self._after)
Records the starting time of this reqeust.
def _before(self): """Records the starting time of this reqeust. """ # Don't trace excluded routes. if request.path in self.excluded_routes: request._tracy_exclude = True return request._tracy_start_time = monotonic() client = request.headers.get(trace_header_client, None) require_client = current_app.config.get("TRACY_REQUIRE_CLIENT", False) if client is None and require_client: abort(400, "Missing %s header" % trace_header_client) request._tracy_client = client request._tracy_id = request.headers.get(trace_header_id, new_id())
Calculates the request duration and adds a transaction ID to the header.
def _after(self, response): """Calculates the request duration, and adds a transaction ID to the header. """ # Ignore excluded routes. if getattr(request, '_tracy_exclude', False): return response duration = None if getattr(request, '_tracy_start_time', None): duration = monotonic() - request._tracy_start_time # Add Trace_ID header. trace_id = None if getattr(request, '_tracy_id', None): trace_id = request._tracy_id response.headers[trace_header_id] = trace_id # Get the invoking client. trace_client = None if getattr(request, '_tracy_client', None): trace_client = request._tracy_client # Extra log kwargs. d = {'status_code': response.status_code, 'url': request.base_url, 'client_ip': request.remote_addr, 'trace_name': trace_client, 'trace_id': trace_id, 'trace_duration': duration} logger.info(None, extra=d) return response
Close the http/ https connect.
def close(self): """Close the http/https connect.""" try: self.response.close() self.logger.debug("close connect succeed.") except Exception as e: self.unknown("close connect error: %s" % e)
IMPORTANT: expects path s parent to already be deref () erenced.
def _stat(self, path): '''IMPORTANT: expects `path`'s parent to already be deref()'erenced.''' if path not in self.entries: return OverlayStat(*self.originals['os:stat'](path)[:10], st_overlay=0) st = self.entries[path].stat if stat.S_ISLNK(st.st_mode): return self._stat(self.deref(path)) return st
IMPORTANT: expects path s parent to already be deref () erenced.
def _lstat(self, path): '''IMPORTANT: expects `path`'s parent to already be deref()'erenced.''' if path not in self.entries: return OverlayStat(*self.originals['os:lstat'](path)[:10], st_overlay=0) return self.entries[path].stat
IMPORTANT: expects path to already be deref () erenced.
def _exists(self, path): '''IMPORTANT: expects `path` to already be deref()'erenced.''' try: return bool(self._stat(path)) except os.error: return False
IMPORTANT: expects path to already be deref () erenced.
def _lexists(self, path): '''IMPORTANT: expects `path` to already be deref()'erenced.''' try: return bool(self._lstat(path)) except os.error: return False
overlays os. path. exists ()
def fso_exists(self, path): 'overlays os.path.exists()' try: return self._exists(self.deref(path)) except os.error: return False
overlays os. path. lexists ()
def fso_lexists(self, path): 'overlays os.path.lexists()' try: return self._lexists(self.deref(path, to_parent=True)) except os.error: return False
overlays os. listdir ()
def fso_listdir(self, path): 'overlays os.listdir()' path = self.deref(path) if not stat.S_ISDIR(self._stat(path).st_mode): raise OSError(20, 'Not a directory', path) try: ret = self.originals['os:listdir'](path) except Exception: # assuming that `path` was created within this FSO... ret = [] for entry in self.entries.values(): if not entry.path.startswith(path + '/'): continue subpath = entry.path[len(path) + 1:] if '/' in subpath: continue if entry.mode is None: if subpath in ret: ret.remove(subpath) else: if subpath not in ret: ret.append(subpath) return ret
overlays os. mkdir ()
def fso_mkdir(self, path, mode=None): 'overlays os.mkdir()' path = self.deref(path, to_parent=True) if self._lexists(path): raise OSError(17, 'File exists', path) self._addentry(OverlayEntry(self, path, stat.S_IFDIR))
overlays os. makedirs ()
def fso_makedirs(self, path, mode=None): 'overlays os.makedirs()' path = self.abs(path) cur = '/' segments = path.split('/') for idx, seg in enumerate(segments): cur = os.path.join(cur, seg) try: st = self.fso_stat(cur) except OSError: st = None if st is None: self.fso_mkdir(cur) continue if idx + 1 == len(segments): raise OSError(17, 'File exists', path) if not stat.S_ISDIR(st.st_mode): raise OSError(20, 'Not a directory', path)
overlays os. rmdir ()
def fso_rmdir(self, path): 'overlays os.rmdir()' st = self.fso_lstat(path) if not stat.S_ISDIR(st.st_mode): raise OSError(20, 'Not a directory', path) if len(self.fso_listdir(path)) > 0: raise OSError(39, 'Directory not empty', path) self._addentry(OverlayEntry(self, path, None))
overlays os. readlink ()
def fso_readlink(self, path): 'overlays os.readlink()' path = self.deref(path, to_parent=True) st = self.fso_lstat(path) if not stat.S_ISLNK(st.st_mode): raise OSError(22, 'Invalid argument', path) if st.st_overlay: return self.entries[path].content return self.originals['os:readlink'](path)
overlays os. symlink ()
def fso_symlink(self, source, link_name): 'overlays os.symlink()' path = self.deref(link_name, to_parent=True) if self._exists(path): raise OSError(17, 'File exists') self._addentry(OverlayEntry(self, path, stat.S_IFLNK, source))
overlays os. unlink ()
def fso_unlink(self, path): 'overlays os.unlink()' path = self.deref(path, to_parent=True) if not self._lexists(path): raise OSError(2, 'No such file or directory', path) self._addentry(OverlayEntry(self, path, None))
overlays os. path. islink ()
def fso_islink(self, path): 'overlays os.path.islink()' try: return stat.S_ISLNK(self.fso_lstat(path).st_mode) except OSError: return False
overlays shutil. rmtree ()
def fso_rmtree(self, path, ignore_errors=False, onerror=None): 'overlays shutil.rmtree()' if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if self.fso_islink(path): # symlinks to directories are forbidden, see shutil bug #1669 raise OSError('Cannot call rmtree on a symbolic link') except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return names = [] try: names = self.fso_listdir(path) except os.error, err: onerror(os.listdir, path, sys.exc_info()) for name in names: fullname = os.path.join(path, name) try: mode = self.fso_lstat(fullname).st_mode except os.error: mode = 0 if stat.S_ISDIR(mode): self.fso_rmtree(fullname, ignore_errors, onerror) else: try: self.fso_remove(fullname) except OSError as err: onerror(os.remove, fullname, sys.exc_info()) try: self.fso_rmdir(path) except os.error: onerror(os.rmdir, path, sys.exc_info())
MWAPIWrapper 控制API请求异常的装饰器 根据requests库定义的异常来控制请求返回的意外情况
def MWAPIWrapper(func): """ MWAPIWrapper 控制API请求异常的装饰器 根据requests库定义的异常来控制请求返回的意外情况 """ @wraps(func) def wrapper(*args, **kwargs): self = args[0] try: result = func(*args, **kwargs) return result except ConnectionError: err_title = '连接错误' err_message = '[{name}] 连接错误,网络状况异常'.format(name=func.__name__, host=self.host) except HTTPError as e: err_title = 'HTTP响应错误' err_message = '[{name}] 目标服务器"{host}" HTTP响应错误({detail})'.format(name=func.__name__, host=self.host, detail=e.message) except Timeout: err_title = '请求超时' err_message = '[{name}] 目标服务器"{host}" 请求超时'.format(name=func.__name__, host=self.host) except TooManyRedirects: err_title = '过多重定向' err_message = '[{name}] 目标服务器"{host}" 过多重定向'.format(name=func.__name__, host=self.host) except ValueError as e: if e.message.find('JSON') >= 0: err_title = 'API JSON返回值异常' err_message = '[{name}] 目标服务器"{host}" API JSON返回值异常'.format(name=func.__name__, host=self.host) else: err_title = '值错误' err_message = '[{name}] 存在ValueError:{msg}'.format(name=func.__name__, msg=e.message) self.log.error(e, exc_info=True) except KeyError as e: err_title = '键错误' err_message = '[{name}] 存在KeyError,错误键为{key}'.format(name=func.__name__, key=e.message) self.log.error(e, exc_info=True) except MWAPIException as e: err_title = 'Mediawiki API 异常' err_message = e.message self.log.error('%s:%s', err_title, err_message) return {'success': False, 'errtitle': err_title, 'errmsg': err_message} return wrapper
Insert spaces between words until it is wide enough for width.
def expand_words(self, line, width=60): """ Insert spaces between words until it is wide enough for `width`. """ if not line.strip(): return line # Word index, which word to insert on (cycles between 1->len(words)) wordi = 1 while len(strip_codes(line)) < width: wordendi = self.find_word_end(line, wordi) if wordendi < 0: # Reached the end?, try starting at the front again. wordi = 1 wordendi = self.find_word_end(line, wordi) if wordendi < 0: # There are no spaces to expand, just prepend one. line = ''.join((' ', line)) else: line = ' '.join((line[:wordendi], line[wordendi:])) wordi += 1 # Don't push a single word all the way to the right. if ' ' not in strip_codes(line).strip(): return line.replace(' ', '') return line
This is a helper method for self. expand_words (). Finds the index of word endings ( default is first word ). The last word doesn t count. If there are no words or there are no spaces in the word it returns - 1.
def find_word_end(text, count=1): """ This is a helper method for self.expand_words(). Finds the index of word endings (default is first word). The last word doesn't count. If there are no words, or there are no spaces in the word, it returns -1. This method ignores escape codes. Example: s = 'this is a test' i = find_word_end(s, count=1) print('-'.join((s[:i], s[i:]))) # 'this- is a test' i = find_word_end(s, count=2) print('-'.join((s[:i], s[i:]))) # 'this is- a test' """ if not text: return -1 elif ' ' not in text: return 0 elif not text.strip(): return -1 count = count or 1 found = 0 foundindex = -1 inword = False indices = get_indices(str(text)) sortedindices = sorted(indices) for i in sortedindices: c = indices[i] if inword and c.isspace(): # Found space. inword = False foundindex = i found += 1 # Was there an escape code before this space? testindex = i while testindex > 0: testindex -= 1 s = indices.get(testindex, None) if s is None: # Must be in the middle of an escape code. continue if len(s) == 1: # Test index was a char. foundindex = testindex + 1 break if found == count: return foundindex elif not c.isspace(): inword = True # We ended in a word/escape-code, or there were no words. lastindex = sortedindices[-1] if len(indices[lastindex]) > 1: # Last word included an escape code. Rewind a bit. while lastindex > 0: lastindex -= 1 s = indices.get(lastindex, None) if s is None: # Must be in the middle of an escape code. continue if len(s) == 1: # Found last char. return lastindex + 1 return -1 if inword else foundindex
Format a long string into a block of newline seperated text. Arguments: See iter_format_block ().
def format( self, text=None, width=60, chars=False, fill=False, newlines=False, prepend=None, append=None, strip_first=False, strip_last=False, lstrip=False): """ Format a long string into a block of newline seperated text. Arguments: See iter_format_block(). """ # Basic usage of iter_format_block(), for convenience. return '\n'.join( self.iter_format_block( (self.text if text is None else text) or '', prepend=prepend, append=append, strip_first=strip_first, strip_last=strip_last, width=width, chars=chars, fill=fill, newlines=newlines, lstrip=lstrip ) )
Prepend or append text to lines. Yields each line.
def iter_add_text(self, lines, prepend=None, append=None): """ Prepend or append text to lines. Yields each line. """ if (prepend is None) and (append is None): yield from lines else: # Build up a format string, with optional {prepend}/{append} fmtpcs = ['{prepend}'] if prepend else [] fmtpcs.append('{line}') if append: fmtpcs.append('{append}') fmtstr = ''.join(fmtpcs) yield from ( fmtstr.format(prepend=prepend, line=line, append=append) for line in lines )
Iterator that turns a long string into lines no greater than width in length. It can wrap on spaces or characters. It only does basic blocks. For prepending see iter_format_block ().
def iter_block( self, text=None, width=60, chars=False, newlines=False, lstrip=False): """ Iterator that turns a long string into lines no greater than 'width' in length. It can wrap on spaces or characters. It only does basic blocks. For prepending see `iter_format_block()`. Arguments: text : String to format. width : Maximum width for each line. Default: 60 chars : Wrap on characters if true, otherwise on spaces. Default: False newlines : Preserve newlines when True. Default: False lstrip : Whether to remove leading spaces from each line. Default: False """ text = (self.text if text is None else text) or '' if width < 1: width = 1 fmtline = str.lstrip if lstrip else str if chars and (not newlines): # Simple block by chars, newlines are treated as a space. yield from self.iter_char_block( text, width=width, fmtfunc=fmtline ) elif newlines: # Preserve newlines for line in text.split('\n'): yield from self.iter_block( line, width=width, chars=chars, lstrip=lstrip, newlines=False, ) else: # Wrap on spaces (ignores newlines).. yield from self.iter_space_block( text, width=width, fmtfunc=fmtline, )
Format block by splitting on individual characters.
def iter_char_block(self, text=None, width=60, fmtfunc=str): """ Format block by splitting on individual characters. """ if width < 1: width = 1 text = (self.text if text is None else text) or '' text = ' '.join(text.split('\n')) escapecodes = get_codes(text) if not escapecodes: # No escape codes, use simple method. yield from ( fmtfunc(text[i:i + width]) for i in range(0, len(text), width) ) else: # Ignore escape codes when counting. blockwidth = 0 block = [] for i, s in enumerate(get_indices_list(text)): block.append(s) if len(s) == 1: # Normal char. blockwidth += 1 if blockwidth == width: yield ''.join(block) block = [] blockwidth = 0 if block: yield ''.join(block)
Iterate over lines in a formatted block of text. This iterator allows you to prepend to each line. For basic blocks see iter_block ().
def iter_format_block( self, text=None, width=60, chars=False, fill=False, newlines=False, append=None, prepend=None, strip_first=False, strip_last=False, lstrip=False): """ Iterate over lines in a formatted block of text. This iterator allows you to prepend to each line. For basic blocks see iter_block(). Arguments: text : String to format. width : Maximum width for each line. The prepend string is not included in this calculation. Default: 60 chars : Whether to wrap on characters instead of spaces. Default: False fill : Insert spaces between words so that each line is the same width. This overrides `chars`. Default: False newlines : Whether to preserve newlines in the original string. Default: False append : String to append after each line. prepend : String to prepend before each line. strip_first : Whether to omit the prepend string for the first line. Default: False Example (when using prepend='$'): Without strip_first -> '$this', '$that' With strip_first -> 'this', '$that' strip_last : Whether to omit the append string for the last line (like strip_first does for prepend). Default: False lstrip : Whether to remove leading spaces from each line. This doesn't include any spaces in `prepend`. Default: False """ if fill: chars = False iterlines = self.iter_block( (self.text if text is None else text) or '', width=width, chars=chars, newlines=newlines, lstrip=lstrip, ) if not (prepend or append): # Shortcut some of the logic below when not prepending/appending. if fill: yield from ( self.expand_words(l, width=width) for l in iterlines ) else: yield from iterlines else: # Prepend, append, or both prepend/append to each line. if prepend: prependlen = len(prepend) else: # No prepend, stripping not necessary and shouldn't be tried. strip_first = False prependlen = 0 if append: # Unfortunately appending mean exhausting the generator. # I don't know where the last line is if I don't. lines = list(iterlines) lasti = len(lines) - 1 iterlines = (l for l in lines) appendlen = len(append) else: # No append, stripping not necessary and shouldn't be tried. strip_last = False appendlen = 0 lasti = -1 for i, l in enumerate(self.iter_add_text( iterlines, prepend=prepend, append=append)): if strip_first and (i == 0): # Strip the prepend that iter_add_text() added. l = l[prependlen:] elif strip_last and (i == lasti): # Strip the append that iter_add_text() added. l = l[:-appendlen] if fill: yield self.expand_words(l, width=width) else: yield l
Format block by wrapping on spaces.
def iter_space_block(self, text=None, width=60, fmtfunc=str): """ Format block by wrapping on spaces. """ if width < 1: width = 1 curline = '' text = (self.text if text is None else text) or '' for word in text.split(): possibleline = ' '.join((curline, word)) if curline else word # Ignore escape codes. codelen = sum(len(s) for s in get_codes(possibleline)) reallen = len(possibleline) - codelen if reallen > width: # This word would exceed the limit, start a new line with # it. yield fmtfunc(curline) curline = word else: curline = possibleline # yield the last line. if curline: yield fmtfunc(curline)
Remove spaces in between words until it is small enough for width. This will always leave at least one space between words so it may not be able to get below width characters.
def squeeze_words(line, width=60): """ Remove spaces in between words until it is small enough for `width`. This will always leave at least one space between words, so it may not be able to get below `width` characters. """ # Start removing spaces to "squeeze" the text, leaving at least one. while (' ' in line) and (len(line) > width): # Remove two spaces from the end, replace with one. head, _, tail = line.rpartition(' ') line = ' '.join((head, tail)) return line
Check IP trough the httpBL API
def check_ip(self, ip): """ Check IP trough the httpBL API :param ip: ipv4 ip address :return: httpBL results or None if any error is occurred """ self._last_result = None if is_valid_ipv4(ip): key = None if self._use_cache: key = self._make_cache_key(ip) self._last_result = self._cache.get(key, version=self._cache_version) if self._last_result is None: # request httpBL API error, age, threat, type = self._request_httpbl(ip) if error == 127 or error == 0: self._last_result = { 'error': error, 'age': age, 'threat': threat, 'type': type } if self._use_cache: self._cache.set(key, self._last_result, timeout=self._api_timeout, version=self._cache_version) if self._last_result is not None and settings.CACHED_HTTPBL_USE_LOGGING: logger.info( 'httpBL check ip: {0}; ' 'httpBL result: error: {1}, age: {2}, threat: {3}, type: {4}'.format(ip, self._last_result['error'], self._last_result['age'], self._last_result['threat'], self._last_result['type'] ) ) return self._last_result
Check if IP is a threat
def is_threat(self, result=None, harmless_age=None, threat_score=None, threat_type=None): """ Check if IP is a threat :param result: httpBL results; if None, then results from last check_ip() used (optional) :param harmless_age: harmless age for check if httpBL age is older (optional) :param threat_score: threat score for check if httpBL threat is lower (optional) :param threat_type: threat type, if not equal httpBL score type, then return False (optional) :return: True or False """ harmless_age = harmless_age if harmless_age is not None else settings.CACHED_HTTPBL_HARMLESS_AGE threat_score = threat_score if threat_score is not None else settings.CACHED_HTTPBL_THREAT_SCORE threat_type = threat_type if threat_type is not None else -1 result = result if result is not None else self._last_result threat = False if result is not None: if result['age'] < harmless_age and result['threat'] > threat_score: threat = True if threat_type > -1: if result['type'] & threat_type: threat = True else: threat = False return threat
Check if IP is suspicious
def is_suspicious(self, result=None): """ Check if IP is suspicious :param result: httpBL results; if None, then results from last check_ip() used (optional) :return: True or False """ result = result if result is not None else self._last_result suspicious = False if result is not None: suspicious = True if result['type'] > 0 else False return suspicious
Invalidate httpBL cache for IP address
def invalidate_ip(self, ip): """ Invalidate httpBL cache for IP address :param ip: ipv4 IP address """ if self._use_cache: key = self._make_cache_key(ip) self._cache.delete(key, version=self._cache_version)
Invalidate httpBL cache
def invalidate_cache(self): """ Invalidate httpBL cache """ if self._use_cache: self._cache_version += 1 self._cache.increment('cached_httpbl_{0}_version'.format(self._api_key))
Runs the consumer.
def run(self): """Runs the consumer.""" self.log.debug('consumer is running...') self.running = True while self.running: self.upload() self.log.debug('consumer exited.')
Upload the next batch of items return whether successful.
def upload(self): """Upload the next batch of items, return whether successful.""" success = False batch = self.next() if len(batch) == 0: return False try: self.request(batch) success = True except Exception as e: self.log.error('error uploading: %s', e) success = False if self.on_error: self.on_error(e, batch) finally: # cleanup for item in batch: self.queue.task_done() return success
Return the next batch of items to upload.
def next(self): """Return the next batch of items to upload.""" queue = self.queue items = [] item = self.next_item() if item is None: return items items.append(item) while len(items) < self.upload_size and not queue.empty(): item = self.next_item() if item: items.append(item) return items
Get a single item from the queue.
def next_item(self): """Get a single item from the queue.""" queue = self.queue try: item = queue.get(block=True, timeout=5) return item except Exception: return None
Attempt to upload the batch and retry before raising an error
def request(self, batch, attempt=0): """Attempt to upload the batch and retry before raising an error """ try: q = self.api.new_queue() for msg in batch: q.add(msg['event'], msg['value'], source=msg['source']) q.submit() except: if attempt > self.retries: raise self.request(batch, attempt+1)
Translate camelCase into underscore format.
def _camelcase_to_underscore(url): """ Translate camelCase into underscore format. >>> _camelcase_to_underscore('minutesBetweenSummaries') 'minutes_between_summaries' """ def upper2underscore(text): for char in text: if char.islower(): yield char else: yield '_' if char.isalpha(): yield char.lower() return ''.join(upper2underscore(url))
Creates the Trello endpoint tree.
def create_tree(endpoints): """ Creates the Trello endpoint tree. >>> r = {'1': { \ 'actions': {'METHODS': {'GET'}}, \ 'boards': { \ 'members': {'METHODS': {'DELETE'}}}} \ } >>> r == create_tree([ \ 'GET /1/actions/[idAction]', \ 'DELETE /1/boards/[board_id]/members/[idMember]']) True """ tree = {} for method, url, doc in endpoints: path = [p for p in url.strip('/').split('/')] here = tree # First element (API Version). version = path[0] here.setdefault(version, {}) here = here[version] # The rest of elements of the URL. for p in path[1:]: part = _camelcase_to_underscore(p) here.setdefault(part, {}) here = here[part] # Allowed HTTP methods. if not 'METHODS' in here: here['METHODS'] = [[method, doc]] else: if not method in here['METHODS']: here['METHODS'].append([method, doc]) return tree
Prints the complete YAML.
def main(): """ Prints the complete YAML. """ ep = requests.get(TRELLO_API_DOC).content root = html.fromstring(ep) links = root.xpath('//a[contains(@class, "reference internal")]/@href') pages = [requests.get(TRELLO_API_DOC + u) for u in links if u.endswith('index.html')] endpoints = [] for page in pages: root = html.fromstring(page.content) sections = root.xpath('//div[@class="section"]/h2/..') for sec in sections: ep_html = etree.tostring(sec).decode('utf-8') ep_text = html2text(ep_html).splitlines() match = EP_DESC_REGEX.match(ep_text[0]) if not match: continue ep_method, ep_url = match.groups() ep_text[0] = ' '.join([ep_method, ep_url]) ep_doc = b64encode(gzip.compress('\n'.join(ep_text).encode('utf-8'))) endpoints.append((ep_method, ep_url, ep_doc)) print(yaml.dump(create_tree(endpoints)))
Override the default exit in the parser.: param parser:: param _: exit code. Unused because we don t exit: param message: Optional message
def _parser_exit(parser: argparse.ArgumentParser, proc: "DirectoryListProcessor", _=0, message: Optional[str]=None) -> None: """ Override the default exit in the parser. :param parser: :param _: exit code. Unused because we don't exit :param message: Optional message """ if message: parser._print_message(message, sys.stderr) proc.successful_parse = False
Preprocess any arguments that begin with the fromfile prefix char ( s ). This replaces the one in Argparse because it a ) doesn t process - x y correctly and b ) ignores bad files: param argv: raw options list: return: options list with file references replaced
def decode_file_args(self, argv: List[str]) -> List[str]: """ Preprocess any arguments that begin with the fromfile prefix char(s). This replaces the one in Argparse because it a) doesn't process "-x y" correctly and b) ignores bad files :param argv: raw options list :return: options list with file references replaced """ for arg in [arg for arg in argv if arg[0] in self.fromfile_prefix_chars]: argv.remove(arg) with open(arg[1:]) as config_file: argv += shlex.split(config_file.read()) return self.decode_file_args(argv) return argv
Report an error: param ifn: Input file name: param e: Exception to report
def _proc_error(ifn: str, e: Exception) -> None: """ Report an error :param ifn: Input file name :param e: Exception to report """ type_, value_, traceback_ = sys.exc_info() traceback.print_tb(traceback_, file=sys.stderr) print(file=sys.stderr) print("***** ERROR: %s" % ifn, file=sys.stderr) print(str(e), file=sys.stderr)
Call the actual processor and intercept anything that goes wrong: param proc: Process to call: param ifn: Input file name to process. If absent typical use is stdin: param ofn: Output file name. If absent typical use is stdout: return: true means process was successful
def _call_proc(self, proc: Callable[[Optional[str], Optional[str], argparse.Namespace], bool], ifn: Optional[str], ofn: Optional[str]) -> bool: """ Call the actual processor and intercept anything that goes wrong :param proc: Process to call :param ifn: Input file name to process. If absent, typical use is stdin :param ofn: Output file name. If absent, typical use is stdout :return: true means process was successful """ rslt = False try: rslt = proc(ifn, ofn, self.opts) except Exception as e: self._proc_error(ifn, e) return True if rslt or rslt is None else False
Run the directory list processor calling a function per file.: param proc: Process to invoke. Args: input_file_name output_file_name argparse options. Return pass or fail. No return also means pass: param file_filter: Additional filter for testing file names types etc.: param file_filter_2: File filter that includes directory filename and opts ( separate for backwards compatibility ): return: tuple - ( number of files passed to proc: int number of files that passed proc )
def run(self, proc: Callable[[Optional[str], Optional[str], argparse.Namespace], Optional[bool]], file_filter: Optional[Callable[[str], bool]]=None, file_filter_2: Optional[Callable[[Optional[str], str, argparse.Namespace], bool]]=None) \ -> Tuple[int, int]: """ Run the directory list processor calling a function per file. :param proc: Process to invoke. Args: input_file_name, output_file_name, argparse options. Return pass or fail. No return also means pass :param file_filter: Additional filter for testing file names, types, etc. :param file_filter_2: File filter that includes directory, filename and opts (separate for backwards compatibility) :return: tuple - (number of files passed to proc: int, number of files that passed proc) """ nfiles = 0 nsuccess = 0 # List of one or more input and output files if self.opts.infile: for file_idx in range(len(self.opts.infile)): in_f = self.opts.infile[file_idx] if self._check_filter(in_f, self.opts.indir, file_filter, file_filter_2): fn = os.path.join(self.opts.indir, in_f) if self.opts.indir else in_f nfiles += 1 if self._call_proc(proc, fn, self._outfile_name('', fn, outfile_idx=file_idx)): nsuccess += 1 elif self.opts.stoponerror: return nfiles, nsuccess # Single input from the command line elif not self.opts.indir: if self._check_filter(None, None, file_filter, file_filter_2): nfiles += 1 if self._call_proc(proc, None, self._outfile_name('', '')): nsuccess += 1 # Input directory that needs to be navigated else: for dirpath, _, filenames in os.walk(self.opts.indir): for fn in filenames: if self._check_filter(fn, dirpath, file_filter, file_filter_2): nfiles += 1 if self._call_proc(proc, os.path.join(dirpath, fn), self._outfile_name(dirpath, fn)): nsuccess += 1 elif self.opts.stoponerror: return nfiles, nsuccess return nfiles, nsuccess
Construct the output file name from the input file. If a single output file was named and there isn t a directory return the output file.: param dirpath: Directory path to infile: param infile: Name of input file: param outfile_idx: Index into output file list ( for multiple input/ output files ): return: Full name of output file or None if output is not otherwise supplied
def _outfile_name(self, dirpath: str, infile: str, outfile_idx: int=0) -> Optional[str]: """ Construct the output file name from the input file. If a single output file was named and there isn't a directory, return the output file. :param dirpath: Directory path to infile :param infile: Name of input file :param outfile_idx: Index into output file list (for multiple input/output files) :return: Full name of output file or None if output is not otherwise supplied """ if not self.opts.outfile and not self.opts.outdir: # Up to the process itself to decide what do do with it return None if self.opts.outfile: # Output file specified - either one aggregate file or a 1 to 1 list outfile_element = self.opts.outfile[0] if len(self.opts.outfile) == 1 else self.opts.outfile[outfile_idx] elif self.opts.infile: # Input file name(s) have been supplied if '://' in infile: # Input file is a URL -- generate an output file of the form "_url[n]" outfile_element = "_url{}".format(outfile_idx + 1) else: outfile_element = os.path.basename(infile).rsplit('.', 1)[0] else: # Doing an input directory to an output directory relpath = dirpath[len(self.opts.indir) + 1:] if not self.opts.flatten and self.opts.indir else '' outfile_element = os.path.join(relpath, os.path.split(infile)[1][:-len(self.infile_suffix)]) return (os.path.join(self.opts.outdir, outfile_element) if self.opts.outdir else outfile_element) + \ (self.outfile_suffix if not self.opts.outfile and self.outfile_suffix else '')
Close and exit the connection.
def quit(self): """Close and exit the connection.""" try: self.ftp.quit() self.logger.debug("quit connect succeed.") except ftplib.Error as e: self.unknown("quit connect error: %s" % e)
Connect by wmi and run wql.
def query(self, wql): """Connect by wmi and run wql.""" try: self.__wql = ['wmic', '-U', self.args.domain + '\\' + self.args.user + '%' + self.args.password, '//' + self.args.host, '--namespace', self.args.namespace, '--delimiter', self.args.delimiter, wql] self.logger.debug("wql: {}".format(self.__wql)) self.__output = subprocess.check_output(self.__wql) self.logger.debug("output: {}".format(self.__output)) self.logger.debug("wmi connect succeed.") self.__wmi_output = self.__output.splitlines()[1:] self.logger.debug("wmi_output: {}".format(self.__wmi_output)) self.__csv_header = csv.DictReader(self.__wmi_output, delimiter='|') self.logger.debug("csv_header: {}".format(self.__csv_header)) return list(self.__csv_header) except subprocess.CalledProcessError as e: self.unknown("Connect by wmi and run wql error: %s" % e)
Changes the cached value and updates creation time. Args: value: the new cached value. timeout: time to live for the object in milliseconds Returns: None
def set_value(self, value, timeout): """ Changes the cached value and updates creation time. Args: value: the new cached value. timeout: time to live for the object in milliseconds Returns: None """ self.value = value self.expiration = time.clock() * 1000 + timeout
Wrapper for the other log methods decide which one based on the URL parameter.
def log(self, url=None, credentials=None, do_verify_certificate=True): """ Wrapper for the other log methods, decide which one based on the URL parameter. """ if url is None: url = self.url if re.match("file://", url): self.log_file(url) elif re.match("https://", url) or re.match("http://", url): self.log_post(url, credentials, do_verify_certificate) else: self.log_stdout()
Write to a local log file
def log_file(self, url=None): """ Write to a local log file """ if url is None: url = self.url f = re.sub("file://", "", url) try: with open(f, "a") as of: of.write(str(self.store.get_json_tuples(True))) except IOError as e: print(e) print("Could not write the content to the file..")
Write to a remote host via HTTP POST
def log_post(self, url=None, credentials=None, do_verify_certificate=True): """ Write to a remote host via HTTP POST """ if url is None: url = self.url if credentials is None: credentials = self.credentials if do_verify_certificate is None: do_verify_certificate = self.do_verify_certificate if credentials and "base64" in credentials: headers = {"Content-Type": "application/json", \ 'Authorization': 'Basic %s' % credentials["base64"]} else: headers = {"Content-Type": "application/json"} try: request = requests.post(url, headers=headers, \ data=self.store.get_json(), verify=do_verify_certificate) except httplib.IncompleteRead as e: request = e.partial
Helper method to store username and password
def register_credentials(self, credentials=None, user=None, user_file=None, password=None, password_file=None): """ Helper method to store username and password """ # lets store all kind of credential data into this dict if credentials is not None: self.credentials = credentials else: self.credentials = {} # set the user from CLI or file if user: self.credentials["user"] = user elif user_file: with open(user_file, "r") as of: # what would the file entry look like? pattern = re.compile("^user: ") for l in of: if re.match(pattern, l): # strip away the newline l = l[0:-1] self.credentials["user"] = re.sub(pattern, "", l) # remove any surrounding quotes if self.credentials["user"][0:1] == '"' and \ self.credentials["user"][-1:] == '"': self.credentials["user"] = self.credentials["user"][1:-1] # set the password from CLI or file if password: self.credentials["password"] = password elif password_file: with open(password_file, "r") as of: # what would the file entry look like? pattern = re.compile("^password: ") for l in of: if re.match(pattern, l): # strip away the newline l = l[0:-1] self.credentials["password"] = \ re.sub(pattern, "", l) # remove any surrounding quotes if self.credentials["password"][0:1] == '"' and \ self.credentials["password"][-1:] == '"': self.credentials["password"] = \ self.credentials["password"][1:-1] # if both user and password is set, # 1. encode to base 64 for basic auth if "user" in self.credentials and "password" in self.credentials: c = self.credentials["user"] + ":" + self.credentials["password"] self.credentials["base64"] = b64encode(c.encode()).decode("ascii")
Wrap a generator function so that it returns a list when called. For example: # Define a generator >>> def mygen ( n ):... i = 0... while i < n:... yield i... i + = 1 # This is how it might work >>> generator = mygen ( 5 ) >>> generator. next () 0 >>> generator. next () 1 # Wrap it in generator_to_list and it will behave differently. >>> mygen = generator_to_list ( mygen ) >>> mygen ( 5 ) [ 0 1 2 3 4 ]
def generator_to_list(function): """ Wrap a generator function so that it returns a list when called. For example: # Define a generator >>> def mygen(n): ... i = 0 ... while i < n: ... yield i ... i += 1 # This is how it might work >>> generator = mygen(5) >>> generator.next() 0 >>> generator.next() 1 # Wrap it in generator_to_list, and it will behave differently. >>> mygen = generator_to_list(mygen) >>> mygen(5) [0, 1, 2, 3, 4] """ def wrapper(*args, **kwargs): return list(function(*args, **kwargs)) wrapper.__name__ = function.__name__ wrapper.__doc__ = function.__doc__ return wrapper
Return the next available filename for a particular filename prefix. For example: >>> import os # Make three ( empty ) files in a directory >>> fp0 = open ( file. 0 w ) >>> fp1 = open ( file. 1 w ) >>> fp2 = open ( file. 2 w ) >>> fp0. close () fp1. close () fp2. close () ( None None None ) # Use logrotate to get the next available filename. >>> logrotate ( file ) file. 3 >>> logrotate ( file. 2 ) file. 3 >>> logrotate ( file. 1 ) file. 3 This can be used to get the next available filename for logging allowing you to rotate log files without using Python s logging module.
def logrotate(filename): """ Return the next available filename for a particular filename prefix. For example: >>> import os # Make three (empty) files in a directory >>> fp0 = open('file.0', 'w') >>> fp1 = open('file.1', 'w') >>> fp2 = open('file.2', 'w') >>> fp0.close(), fp1.close(), fp2.close() (None, None, None) # Use logrotate to get the next available filename. >>> logrotate('file') 'file.3' >>> logrotate('file.2') 'file.3' >>> logrotate('file.1') 'file.3' This can be used to get the next available filename for logging, allowing you to rotate log files, without using Python's ``logging`` module. """ match = re.match(r'(.*)' + re.escape(os.path.extsep) + r'(\d+)', filename) if os.path.exists(filename): if match: prefix, number = match.groups() number = int(number) while os.path.exists(os.path.extsep.join((prefix, str(number)))): number += 1 return os.path.extsep.join((prefix, str(number))) elif match: return filename return logrotate(os.path.extsep.join((filename, '0')))
Set connection parameters. Call set_connection with no arguments to clear.
def set_connection(host=None, database=None, user=None, password=None): """Set connection parameters. Call set_connection with no arguments to clear.""" c.CONNECTION['HOST'] = host c.CONNECTION['DATABASE'] = database c.CONNECTION['USER'] = user c.CONNECTION['PASSWORD'] = password
Set delegate parameters. Call set_delegate with no arguments to clear.
def set_delegate(address=None, pubkey=None, secret=None): """Set delegate parameters. Call set_delegate with no arguments to clear.""" c.DELEGATE['ADDRESS'] = address c.DELEGATE['PUBKEY'] = pubkey c.DELEGATE['PASSPHRASE'] = secret
returns a list of named tuples of all transactions relevant to a specific delegates voters. Flow: finds all voters and unvoters SELECTs all transactions of those voters names all transactions according to the scheme: transaction id amount timestamp recipientId senderId rawasset type fee blockId
def get_transactionlist(delegate_pubkey): """returns a list of named tuples of all transactions relevant to a specific delegates voters. Flow: finds all voters and unvoters, SELECTs all transactions of those voters, names all transactions according to the scheme: 'transaction', 'id amount timestamp recipientId senderId rawasset type fee blockId'""" res = DbCursor().execute_and_fetchall(""" SELECT transactions."id", transactions."amount", blocks."timestamp", transactions."recipientId", transactions."senderId", transactions."rawasset", transactions."type", transactions."fee", transactions."blockId" FROM transactions INNER JOIN blocks ON transactions."blockId" = blocks.id WHERE transactions."senderId" IN (SELECT transactions."recipientId" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{0}') OR transactions."recipientId" IN (SELECT transactions."recipientId" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{0}') ORDER BY blocks."timestamp" ASC;""".format(delegate_pubkey)) Transaction = namedtuple( 'transaction', 'id amount timestamp recipientId senderId rawasset type fee') named_transactions = [] for i in res: tx_id = Transaction( id=i[0], amount=i[1], timestamp=i[2], recipientId=i[3], senderId=i[4], rawasset=i[5], type=i[6], fee=i[7], ) named_transactions.append(tx_id) return named_transactions
returns a list of named tuples of all transactions relevant to a specific delegates voters. Flow: finds all voters and unvoters SELECTs all transactions of those voters names all transactions according to the scheme: transaction id amount timestamp recipientId senderId rawasset type fee blockId
def get_events(delegate_pubkey): """returns a list of named tuples of all transactions relevant to a specific delegates voters. Flow: finds all voters and unvoters, SELECTs all transactions of those voters, names all transactions according to the scheme: 'transaction', 'id amount timestamp recipientId senderId rawasset type fee blockId'""" res = DbCursor().execute_and_fetchall(""" SELECT * FROM( SELECT transactions."id", transactions."amount", transactions."fee", blocks."timestamp", transactions."recipientId", transactions."senderId", transactions."type", transactions."rawasset" FROM transactions INNER JOIN blocks ON transactions."blockId" = blocks.id WHERE transactions."senderId" IN (SELECT transactions."recipientId" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{0}') OR transactions."recipientId" IN (SELECT transactions."recipientId" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{0}') UNION SELECT blocks."id", blocks."reward", blocks."totalFee", blocks."timestamp", mem_accounts."address", NULL, 100, blocks."rawtxs" FROM blocks INNER JOIN mem_accounts ON mem_accounts."publicKey" = blocks."generatorPublicKey" WHERE mem_accounts."address" IN (SELECT transactions."recipientId" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{0}')) AS events ORDER BY events."timestamp";""".format(delegate_pubkey)) Event = namedtuple( 'Event', 'id amount fee timestamp recipientId senderId type raw') named_events = [] for i in res: tx_id = Event( id=i[0], amount=i[1], fee=i[2], timestamp=i[3], recipientId=i[4], senderId=i[5], type=i[6], raw=i[7] ) named_events.append(tx_id) return named_events
returns all received transactions between the address and registered delegate accounts ORDER by timestamp ASC.
def payout(address): """returns all received transactions between the address and registered delegate accounts ORDER by timestamp ASC.""" qry = DbCursor().execute_and_fetchall(""" SELECT DISTINCT transactions."id", transactions."amount", transactions."timestamp", transactions."recipientId", transactions."senderId", transactions."rawasset", transactions."type", transactions."fee" FROM transactions, delegates WHERE transactions."senderId" IN ( SELECT transactions."senderId" FROM transactions, delegates WHERE transactions."id" = delegates."transactionId" ) AND transactions."recipientId" = '{}' ORDER BY transactions."timestamp" ASC""".format(address)) Transaction = namedtuple( 'transaction', 'id amount timestamp recipientId senderId rawasset type fee') named_transactions = [] for i in qry: tx_id = Transaction( id=i[0], amount=i[1], timestamp=i[2], recipientId=i[3], senderId=i[4], rawasset=i[5], type=i[6], fee=i[7], ) named_transactions.append(tx_id) return named_transactions
Returns a list of namedtuples all votes made by an address { ( +/ - ) pubkeydelegate: timestamp } timestamp DESC
def votes(address): """Returns a list of namedtuples all votes made by an address, {(+/-)pubkeydelegate:timestamp}, timestamp DESC""" qry = DbCursor().execute_and_fetchall(""" SELECT votes."votes", transactions."timestamp" FROM votes, transactions WHERE transactions."id" = votes."transactionId" AND transactions."senderId" = '{}' ORDER BY transactions."timestamp" DESC """.format(address)) Vote = namedtuple( 'vote', 'direction delegate timestamp') res = [] for i in qry: if i[0][0] == '+': direction = True elif i[0][0] == '-': direction = False else: logger.fatal('failed to interpret direction for: {}'.format(i)) raise ParseError('failed to interpret direction of vote for: {}'.format(i)) vote = Vote( direction=direction, delegate=i[0][1:], timestamp=i[1], ) res.append(vote) return res
Takes a single address and returns the current balance.
def balance(address): """ Takes a single address and returns the current balance. """ txhistory = Address.transactions(address) balance = 0 for i in txhistory: if i.recipientId == address: balance += i.amount if i.senderId == address: balance -= (i.amount + i.fee) delegates = Delegate.delegates() for i in delegates: if address == i.address: forged_blocks = Delegate.blocks(i.pubkey) for block in forged_blocks: balance += (block.reward + block.totalFee) if balance < 0: height = Node.height() logger.fatal('Negative balance for address {0}, Nodeheight: {1)'.format(address, height)) raise NegativeBalanceError('Negative balance for address {0}, Nodeheight: {1)'.format(address, height)) return balance
returns a list of named tuples x. timestamp x. amount including block rewards
def balance_over_time(address): """returns a list of named tuples, x.timestamp, x.amount including block rewards""" forged_blocks = None txhistory = Address.transactions(address) delegates = Delegate.delegates() for i in delegates: if address == i.address: forged_blocks = Delegate.blocks(i.pubkey) balance_over_time = [] balance = 0 block = 0 Balance = namedtuple( 'balance', 'timestamp amount') for tx in txhistory: if forged_blocks: while forged_blocks[block].timestamp <= tx.timestamp: balance += (forged_blocks[block].reward + forged_blocks[block].totalFee) balance_over_time.append(Balance(timestamp=forged_blocks[block].timestamp, amount=balance)) block += 1 if tx.senderId == address: balance -= (tx.amount + tx.fee) res = Balance(timestamp=tx.timestamp, amount=balance) balance_over_time.append(res) if tx.recipientId == address: balance += tx.amount res = Balance(timestamp=tx.timestamp, amount=balance) balance_over_time.append(res) if forged_blocks and block <= len(forged_blocks) - 1: if forged_blocks[block].timestamp > txhistory[-1].timestamp: for i in forged_blocks[block:]: balance += (i.reward + i.totalFee) res = Balance(timestamp=i.timestamp, amount=balance) balance_over_time.append(res) return balance_over_time
returns a list of named tuples of all delegates. { username: { pubkey: pubkey timestamp: timestamp address: address }}
def delegates(): """returns a list of named tuples of all delegates. {username: {'pubkey':pubkey, 'timestamp':timestamp, 'address':address}}""" qry = DbCursor().execute_and_fetchall(""" SELECT delegates."username", delegates."transactionId", transactions."timestamp", transactions."senderId", transactions."senderPublicKey" FROM transactions JOIN delegates ON transactions."id" = delegates."transactionId" """) Delegate = namedtuple( 'delegate', 'username pubkey timestamp address transactionId') res = [] for i in qry: registration = Delegate( username=i[0], pubkey=binascii.hexlify(i[4]).decode("utf-8"), timestamp=i[2], address=i[3], transactionId=i[1] ) res.append(registration) return res
Assumes that all send transactions from a delegate are payouts. Use blacklist to remove rewardwallet and other transactions if the address is not a voter. blacklist can contain both addresses and transactionIds
def lastpayout(delegate_address, blacklist=None): ''' Assumes that all send transactions from a delegate are payouts. Use blacklist to remove rewardwallet and other transactions if the address is not a voter. blacklist can contain both addresses and transactionIds''' if blacklist and len(blacklist) > 1: command_blacklist = 'NOT IN ' + str(tuple(blacklist)) elif blacklist and len(blacklist) == 1: command_blacklist = '!= ' + "'" + blacklist[0] + "'" else: command_blacklist = "!= 'nothing'" qry = DbCursor().execute_and_fetchall(""" SELECT ts."recipientId", ts."id", ts."timestamp" FROM transactions ts, (SELECT MAX(transactions."timestamp") AS max_timestamp, transactions."recipientId" FROM transactions WHERE transactions."senderId" = '{0}' AND transactions."id" {1} GROUP BY transactions."recipientId") maxresults WHERE ts."recipientId" = maxresults."recipientId" AND ts."recipientId" {1} AND ts."timestamp"= maxresults.max_timestamp; """.format(delegate_address, command_blacklist)) result = [] Payout = namedtuple( 'payout', 'address id timestamp') for i in qry: payout = Payout( address=i[0], id=i[1], timestamp=i[2] ) result.append(payout) return result
returns every address that has voted for a delegate. Current voters can be obtained using voters. ORDER BY timestamp ASC
def votes(delegate_pubkey): """returns every address that has voted for a delegate. Current voters can be obtained using voters. ORDER BY timestamp ASC""" qry = DbCursor().execute_and_fetchall(""" SELECT transactions."recipientId", transactions."timestamp" FROM transactions, votes WHERE transactions."id" = votes."transactionId" AND votes."votes" = '+{}' ORDER BY transactions."timestamp" ASC; """.format(delegate_pubkey)) Voter = namedtuple( 'voter', 'address timestamp') voters = [] for i in qry: voter = Voter( address=i[0], timestamp=i[1] ) voters.append(voter) return voters
returns a list of named tuples of all blocks forged by a delegate. if delegate_pubkey is not specified set_delegate needs to be called in advance. max_timestamp can be configured to retrieve blocks up to a certain timestamp.
def blocks(delegate_pubkey=None, max_timestamp=None): """returns a list of named tuples of all blocks forged by a delegate. if delegate_pubkey is not specified, set_delegate needs to be called in advance. max_timestamp can be configured to retrieve blocks up to a certain timestamp.""" if not delegate_pubkey: delegate_pubkey = c.DELEGATE['PUBKEY'] if max_timestamp: max_timestamp_sql = """ blocks."timestamp" <= {} AND""".format(max_timestamp) else: max_timestamp_sql = '' qry = DbCursor().execute_and_fetchall(""" SELECT blocks."timestamp", blocks."height", blocks."id", blocks."totalFee", blocks."reward" FROM blocks WHERE {0} blocks."generatorPublicKey" = '\\x{1}' ORDER BY blocks."timestamp" ASC""".format( max_timestamp_sql, delegate_pubkey)) Block = namedtuple('block', 'timestamp height id totalFee reward') block_list = [] for block in qry: block_value = Block(timestamp=block[0], height=block[1], id=block[2], totalFee=block[3], reward=block[4]) block_list.append(block_value) return block_list
Calculate the true blockweight payout share for a given delegate assuming no exceptions were made for a voter. last_payout is a map of addresses and timestamps: { address: timestamp }. If no argument are given it will start the calculation at the first forged block by the delegate generate a last_payout from transaction history and use the set_delegate info.
def share(passphrase=None, last_payout=None, start_block=0, del_pubkey=None, del_address=None): """Calculate the true blockweight payout share for a given delegate, assuming no exceptions were made for a voter. last_payout is a map of addresses and timestamps: {address: timestamp}. If no argument are given, it will start the calculation at the first forged block by the delegate, generate a last_payout from transaction history, and use the set_delegate info. If a passphrase is provided, it is only used to generate the adddress and keys, no transactions are sent. (Still not recommended unless you know what you are doing, version control could store your passphrase for example; very risky) """ logger.info('starting share calculation using settings: {0} {1}'.format(c.DELEGATE, c.CALCULATION_SETTINGS)) delegate_pubkey = c.DELEGATE['PUBKEY'] delegate_address = c.DELEGATE['ADDRESS'] if del_pubkey and del_address: delegate_address = del_address delegate_pubkey = del_pubkey logger.info('Starting share calculation, using address:{0}, pubkey:{1}'.format(delegate_address, delegate_pubkey)) max_timestamp = Node.max_timestamp() logger.info('Share calculation max_timestamp = {}'.format(max_timestamp)) # utils function transactions = get_transactionlist( delegate_pubkey=delegate_pubkey ) votes = Delegate.votes(delegate_pubkey) # create a map of voters voter_dict = {} for voter in votes: voter_dict.update({voter.address: { 'balance': 0.0, 'status': False, 'last_payout': voter.timestamp, 'share': 0.0, 'vote_timestamp': voter.timestamp, 'blocks_forged': []} }) # check if a voter is/used to be a forging delegate delegates = Delegate.delegates() for i in delegates: if i.address in voter_dict: logger.info('A registered delegate is a voter: {0}, {1}, {2}'.format(i.username, i.address, i.pubkey)) try: blocks_by_voter = Delegate.blocks(i.pubkey) voter_dict[i.address]['blocks_forged'].extend(Delegate.blocks(i.pubkey)) logger.info('delegate {0} has forged {1} blocks'.format(i.username, len(blocks_by_voter))) except Exception: logger.info('delegate {} has not forged any blocks'.format(i)) pass try: for i in c.CALCULATION_SETTINGS['BLACKLIST']: voter_dict.pop(i) logger.debug('popped {} from calculations'.format(i)) except Exception: pass if not last_payout: last_payout = Delegate.lastpayout(delegate_address) for payout in last_payout: try: voter_dict[payout.address]['last_payout'] = payout.timestamp except Exception: pass elif type(last_payout) is int: for address in voter_dict: if address['vote_timestamp'] < last_payout: address['last_payout'] = last_payout elif type(last_payout) is dict: for payout in last_payout: try: voter_dict[payout.address]['last_payout'] = payout.timestamp except Exception: pass else: logger.fatal('last_payout object not recognised: {}'.format(type(last_payout))) raise InputError('last_payout object not recognised: {}'.format(type(last_payout))) # get all forged blocks of delegate: blocks = Delegate.blocks(max_timestamp=max_timestamp, delegate_pubkey=delegate_pubkey) block_nr = start_block chunk_dict = {} reuse = False try: for tx in transactions: while tx.timestamp > blocks[block_nr].timestamp: if reuse: block_nr += 1 for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] continue block_nr += 1 poolbalance = 0 chunk_dict = {} for i in voter_dict: balance = voter_dict[i]['balance'] try: if voter_dict[i]['balance'] > c.CALCULATION_SETTINGS['MAX']: balance = c.CALCULATION_SETTINGS['MAX'] except Exception: pass try: if balance > c.CALCULATION_SETTINGS['EXCEPTIONS'][i]['REPLACE']: balance = c.CALCULATION_SETTINGS['EXCEPTIONS'][i]['REPLACE'] except Exception: pass try: for x in voter_dict[i]['blocks_forged']: if x.timestamp < blocks[block_nr].timestamp: voter_dict[i]['balance'] += (x.reward + x.totalFee) voter_dict[i]['blocks_forged'].remove(x) balance = voter_dict[i]['balance'] except Exception: pass if voter_dict[i]['status']: if not voter_dict[i]['balance'] < -20 * c.ARK: poolbalance += balance else: logger.fatal('balance lower than zero for: {0}'.format(i)) raise NegativeBalanceError('balance lower than zero for: {0}'.format(i)) for i in voter_dict: balance = voter_dict[i]['balance'] if voter_dict[i]['balance'] > c.CALCULATION_SETTINGS['MAX']: balance = c.CALCULATION_SETTINGS['MAX'] try: if balance > c.CALCULATION_SETTINGS['EXCEPTIONS'][i]['REPLACE']: balance = c.CALCULATION_SETTINGS['EXCEPTIONS'][i]['REPLACE'] except Exception: pass if voter_dict[i]['status'] and voter_dict[i]['last_payout'] < blocks[block_nr].timestamp: if c.CALCULATION_SETTINGS['SHARE_FEES']: share = (balance/poolbalance) * (blocks[block_nr].reward + blocks[block_nr].totalFee) else: share = (balance/poolbalance) * blocks[block_nr].reward voter_dict[i]['share'] += share chunk_dict.update({i: share}) reuse = True # parsing a transaction minvote = '{{"votes":["-{0}"]}}'.format(delegate_pubkey) plusvote = '{{"votes":["+{0}"]}}'.format(delegate_pubkey) reuse = False if tx.recipientId in voter_dict: voter_dict[tx.recipientId]['balance'] += tx.amount if tx.senderId in voter_dict: voter_dict[tx.senderId]['balance'] -= (tx.amount + tx.fee) if tx.senderId in voter_dict and tx.type == 3 and plusvote in tx.rawasset: voter_dict[tx.senderId]['status'] = True if tx.senderId in voter_dict and tx.type == 3 and minvote in tx.rawasset: voter_dict[tx.senderId]['status'] = False remaining_blocks = len(blocks) - block_nr - 1 for i in range(remaining_blocks): for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] # an IndexError occurs if max(transactions.timestamp) > max(blocks.timestamp) This means we parsed every block except IndexError: pass for i in voter_dict: logger.info("{0} {1} {2} {3} {4}".format(i, voter_dict[i]['share'], voter_dict[i]['status'], voter_dict[i]['last_payout'], voter_dict[i]['vote_timestamp'])) return voter_dict, max_timestamp
Legacy TBW script ( still pretty performant but has some quirky behavior when forging delegates are amongst your voters )
def dep_trueshare(start_block=0, del_pubkey=None, del_address=None, blacklist=None, share_fees=False, max_weight=float('inf'), raiseError=True): ''' Legacy TBW script (still pretty performant, but has some quirky behavior when forging delegates are amongst your voters) :param int start_block: block from which we start adding to the share (we calculate balances from block 0 anyways) :param str del_pubkey: delegate public key as is presented in the ark wallet :param str del_address: delegate address :param list blacklist: blacklist for addresses to be removed BEFORE calculation. Their share is removed from the pool balance :param bool share_fees: if tx fees should be shared as well. :param float max_weight: max_balance of a voter ''' delegate_pubkey = c.DELEGATE['PUBKEY'] delegate_address = c.DELEGATE['ADDRESS'] if del_pubkey and del_address: delegate_address = del_address delegate_pubkey = del_pubkey max_timestamp = Node.max_timestamp() # utils function transactions = get_transactionlist( delegate_pubkey=delegate_pubkey ) votes = Delegate.votes(delegate_pubkey) # create a map of voters voter_dict = {} for voter in votes: voter_dict.update({voter.address: { 'balance': 0.0, 'status': False, 'last_payout': voter.timestamp, 'share': 0.0, 'vote_timestamp': voter.timestamp, 'blocks_forged': []} }) try: for i in blacklist: voter_dict.pop(i) except Exception: pass # check if a voter is/used to be a forging delegate delegates = Delegate.delegates() for i in delegates: if i.address in voter_dict: try: blocks_by_voter = Delegate.blocks(i.pubkey) voter_dict[i.address]['blocks_forged'].extend(Delegate.blocks(i.pubkey)) except Exception: pass last_payout = Delegate.lastpayout(delegate_address) for payout in last_payout: try: voter_dict[payout.address]['last_payout'] = payout.timestamp except Exception: pass blocks = Delegate.blocks(delegate_pubkey) block_nr = start_block chunk_dict = {} reuse = False try: for tx in transactions: while tx.timestamp > blocks[block_nr].timestamp: if reuse: block_nr += 1 for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] continue block_nr += 1 poolbalance = 0 chunk_dict = {} for i in voter_dict: balance = voter_dict[i]['balance'] if balance > max_weight: balance = max_weight #checks if a delegate that votes for us is has forged blocks in the mean time try: for x in voter_dict[i]['blocks_forged']: if x.timestamp < blocks[block_nr].timestamp: voter_dict[i]['balance'] += (x.reward + x.totalFee) voter_dict[i]['blocks_forged'].remove(x) balance = voter_dict[i]['balance'] except Exception: pass if voter_dict[i]['status']: if not voter_dict[i]['balance'] < -20 * c.ARK: poolbalance += balance else: if raiseError: raise NegativeBalanceError('balance lower than zero for: {0}'.format(i)) pass for i in voter_dict: balance = voter_dict[i]['balance'] if voter_dict[i]['status'] and voter_dict[i]['last_payout'] < blocks[block_nr].timestamp: if share_fees: share = (balance / poolbalance) * (blocks[block_nr].reward + blocks[block_nr].totalFee) else: share = (balance / poolbalance) * blocks[block_nr].reward voter_dict[i]['share'] += share chunk_dict.update({i: share}) reuse = True # parsing a transaction minvote = '{{"votes":["-{0}"]}}'.format(delegate_pubkey) plusvote = '{{"votes":["+{0}"]}}'.format(delegate_pubkey) reuse = False if tx.recipientId in voter_dict: voter_dict[tx.recipientId]['balance'] += tx.amount if tx.senderId in voter_dict: voter_dict[tx.senderId]['balance'] -= (tx.amount + tx.fee) if tx.senderId in voter_dict and tx.type == 3 and plusvote in tx.rawasset: voter_dict[tx.senderId]['status'] = True if tx.senderId in voter_dict and tx.type == 3 and minvote in tx.rawasset: voter_dict[tx.senderId]['status'] = False remaining_blocks = len(blocks) - block_nr - 1 for i in range(remaining_blocks): for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] except IndexError: pass for i in voter_dict: logger.info("{0} {1} {2} {3} {4}".format( i, voter_dict[i]['share'], voter_dict[i]['status'], voter_dict[i]['last_payout'], voter_dict[i]['vote_timestamp'])) return voter_dict, max_timestamp
Legacy TBW script ( still pretty performant but has some quirky behavior when forging delegates are amongst your voters )
def trueshare(start_block=0, del_pubkey=None, del_address=None, blacklist=None, share_fees=False, max_weight=float('inf')): ''' Legacy TBW script (still pretty performant, but has some quirky behavior when forging delegates are amongst your voters) :param int start_block: block from which we start adding to the share (we calculate balances from block 0 anyways) :param str del_pubkey: delegate public key as is presented in the ark wallet :param str del_address: delegate address :param list blacklist: blacklist for addresses to be removed BEFORE calculation. Their share is removed from the pool balance :param bool share_fees: if tx fees should be shared as well. :param float max_weight: max_balance of a voter ''' delegate_pubkey = c.DELEGATE['PUBKEY'] delegate_address = c.DELEGATE['ADDRESS'] if del_pubkey and del_address: delegate_address = del_address delegate_pubkey = del_pubkey max_timestamp = Node.max_timestamp() # utils function events = get_events( delegate_pubkey=delegate_pubkey ) votes = Delegate.votes(delegate_pubkey) # create a map of voters voter_dict = {} for voter in votes: voter_dict.update({voter.address: { 'balance': 0.0, 'status': False, 'last_payout': voter.timestamp, 'share': 0.0, 'vote_timestamp': voter.timestamp, 'blocks_forged': []} }) try: for i in blacklist: voter_dict.pop(i) except Exception: pass last_payout = Delegate.lastpayout(delegate_address) for payout in last_payout: try: voter_dict[payout.address]['last_payout'] = payout.timestamp except Exception: pass blocks = Delegate.blocks(delegate_pubkey) block_nr = start_block chunk_dict = {} reuse = False try: for e in events: while e.timestamp > blocks[block_nr].timestamp: if reuse: block_nr += 1 for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] continue block_nr += 1 poolbalance = 0 chunk_dict = {} for i in voter_dict: balance = voter_dict[i]['balance'] if balance > max_weight: balance = max_weight if voter_dict[i]['status']: if voter_dict[i]['balance'] >= 0: poolbalance += balance else: print(voter_dict[i]) raise NegativeBalanceError('balance lower than zero for: {0}'.format(i)) for i in voter_dict: balance = voter_dict[i]['balance'] if voter_dict[i]['status'] and voter_dict[i]['last_payout'] < blocks[block_nr].timestamp: if share_fees: share = (balance / poolbalance) * (blocks[block_nr].reward + blocks[block_nr].totalFee) else: share = (balance / poolbalance) * blocks[block_nr].reward voter_dict[i]['share'] += share chunk_dict.update({i: share}) reuse = True # parsing a transaction minvote = '{{"votes":["-{0}"]}}'.format(delegate_pubkey) plusvote = '{{"votes":["+{0}"]}}'.format(delegate_pubkey) reuse = False # type 100 is a forged block if e.type != 100: if e.recipientId in voter_dict: voter_dict[e.recipientId]['balance'] += e.amount if e.senderId in voter_dict: voter_dict[e.senderId]['balance'] -= (e.amount + e.fee) if e.senderId in voter_dict and e.type == 3 and plusvote in e.raw: voter_dict[e.senderId]['status'] = True if e.senderId in voter_dict and e.type == 3 and minvote in e.raw: voter_dict[e.senderId]['status'] = False elif e.type == 100: if e.recipientId in voter_dict: voter_dict[e.recipientId]['balance'] += e.amount + e.fee remaining_blocks = len(blocks) - block_nr - 1 for i in range(remaining_blocks): for x in chunk_dict: voter_dict[x]['share'] += chunk_dict[x] except IndexError: pass for i in voter_dict: logger.info("{0} {1} {2} {3} {4}".format( i, voter_dict[i]['share'], voter_dict[i]['status'], voter_dict[i]['last_payout'], voter_dict[i]['vote_timestamp'])) return voter_dict, max_timestamp
Adds a source to the factory provided it s type and constructor arguments: param source_class: The class used to instantiate the source: type source_class: type: param constructor_args: Arguments to be passed into the constructor: type constructor_args: Iterable
def add_source(self, source_class, *constructor_args): """ Adds a source to the factory provided it's type and constructor arguments :param source_class: The class used to instantiate the source :type source_class: type :param constructor_args: Arguments to be passed into the constructor :type constructor_args: Iterable """ if not IIPSource.implementedBy(source_class): raise TypeError("source_class {} must implement IIPSource".format(source_class)) else: self._sources.add((source_class, constructor_args))
Generates instantiated sources from the factory: param limit: the max number of sources to yield: type limit: int: param types_list: filter by types so the constructor can be used to accomidate many types: type types_list: class or list of classes: return: Yields types added by add_source: rtype: generator
def get_sources(self, limit=sys.maxsize, types_list=None): """ Generates instantiated sources from the factory :param limit: the max number of sources to yield :type limit: int :param types_list: filter by types so the constructor can be used to accomidate many types :type types_list: class or list of classes :return: Yields types added by add_source :rtype: generator """ if types_list and not isinstance(types_list, (tuple, list)): types_list = [types_list] sources = list(self._sources) random.shuffle(sources) for source in sources: if not types_list or source[0] in types_list: limit -= 1 yield source[0](*source[1]) if limit <= 0: break
Massages the true and false strings to bool equivalents.
def value_to_bool(config_val, evar): """ Massages the 'true' and 'false' strings to bool equivalents. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :rtype: bool :return: True or False, depending on the value. """ if not config_val: return False if config_val.strip().lower() == 'true': return True else: return False
If the value is None fail validation.
def validate_is_not_none(config_val, evar): """ If the value is ``None``, fail validation. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :raises: ValueError if the config value is None. """ if config_val is None: raise ValueError( "Value for environment variable '{evar_name}' can't " "be empty.".format(evar_name=evar.name)) return config_val
Make sure the value evaluates to boolean True.
def validate_is_boolean_true(config_val, evar): """ Make sure the value evaluates to boolean True. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :raises: ValueError if the config value evaluates to boolean False. """ if config_val is None: raise ValueError( "Value for environment variable '{evar_name}' can't " "be empty.".format(evar_name=evar.name)) return config_val
Convert an evar value into a Python logging level constant.
def value_to_python_log_level(config_val, evar): """ Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid. """ if not config_val: config_val = evar.default_val config_val = config_val.upper() # noinspection PyProtectedMember return logging._checkLevel(config_val)
Register a new range type as a PostgreSQL range.
def register_range_type(pgrange, pyrange, conn): """ Register a new range type as a PostgreSQL range. >>> register_range_type("int4range", intrange, conn) The above will make sure intrange is regarded as an int4range for queries and that int4ranges will be cast into intrange when fetching rows. pgrange should be the full name including schema for the custom range type. Note that adaption is global, meaning if a range type is passed to a regular psycopg2 connection it will adapt it to its proper range type. Parsing of rows from the database however is not global and just set on a per connection basis. """ register_adapter(pyrange, partial(adapt_range, pgrange)) register_range_caster( pgrange, pyrange, *query_range_oids(pgrange, conn), scope=conn)
Acquires the correct error for a given response.
def get_api_error(response): """Acquires the correct error for a given response. :param requests.Response response: HTTP error response :returns: the appropriate error for a given response :rtype: APIError """ error_class = _status_code_to_class.get(response.status_code, APIError) return error_class(response)
Converts the request parameters to Python.
def get_param_values(request, model=None): """ Converts the request parameters to Python. :param request: <pyramid.request.Request> || <dict> :return: <dict> """ if type(request) == dict: return request params = get_payload(request) # support in-place editing formatted request try: del params['pk'] params[params.pop('name')] = params.pop('value') except KeyError: pass return { k.rstrip('[]'): safe_eval(v) if not type(v) == list else [safe_eval(sv) for sv in v] for k, v in params.items() }
Extracts ORB context information from the request.
def get_context(request, model=None): """ Extracts ORB context information from the request. :param request: <pyramid.request.Request> :param model: <orb.Model> || None :return: {<str> key: <variant> value} values, <orb.Context> """ # convert request parameters to python param_values = get_param_values(request, model=model) # extract the full orb context if provided context = param_values.pop('orb_context', {}) if isinstance(context, (unicode, str)): context = projex.rest.unjsonify(context) # otherwise, extract the limit information has_limit = 'limit' in context or 'limit' in param_values # create the new orb context orb_context = orb.Context(**context) # build up context information from the request params used = set() query_context = {} for key in orb.Context.Defaults: if key in param_values: used.add(key) query_context[key] = param_values.get(key) # generate a simple query object schema_values = {} if model: # extract match dict items for key, value in request.matchdict.items(): if model.schema().column(key, raise_=False): schema_values[key] = value # extract payload items for key, value in param_values.items(): root_key = key.split('.')[0] schema_object = model.schema().column(root_key, raise_=False) or model.schema().collector(root_key) if schema_object: value = param_values.pop(key) if isinstance(schema_object, orb.Collector) and type(value) not in (tuple, list): value = [value] schema_values[key] = value # generate the base context information query_context['scope'] = { 'request': request } # include any request specific scoping or information from the request # first, look for default ORB context for all requests try: default_context = request.orb_default_context # then, look for scope specific information for all requests except AttributeError: try: query_context['scope'].update(request.orb_scope) except AttributeError: pass # if request specific context defaults exist, then # merge them with the rest of the query context else: if 'scope' in default_context: query_context['scope'].update(default_context.pop('scope')) # setup defaults based on the request for k, v in default_context.items(): query_context.setdefault(k, v) orb_context.update(query_context) return schema_values, orb_context
Handles real - time updates to the order book.
def _real_time_thread(self): """Handles real-time updates to the order book.""" while self.ws_client.connected(): if self.die: break if self.pause: sleep(5) continue message = self.ws_client.receive() if message is None: break message_type = message['type'] if message_type == 'error': continue if message['sequence'] <= self.sequence: continue if message_type == 'open': self._handle_open(message) elif message_type == 'match': self._handle_match(message) elif message_type == 'done': self._handle_done(message) elif message_type == 'change': self._handle_change(message) else: continue self.ws_client.disconnect()
Used exclusively as a thread which keeps the WebSocket alive.
def _keep_alive_thread(self): """Used exclusively as a thread which keeps the WebSocket alive.""" while True: with self._lock: if self.connected(): self._ws.ping() else: self.disconnect() self._thread = None return sleep(30)
Connects and subscribes to the WebSocket Feed.
def connect(self): """Connects and subscribes to the WebSocket Feed.""" if not self.connected(): self._ws = create_connection(self.WS_URI) message = { 'type':self.WS_TYPE, 'product_id':self.WS_PRODUCT_ID } self._ws.send(dumps(message)) # There will be only one keep alive thread per client instance with self._lock: if not self._thread: thread = Thread(target=self._keep_alive_thread, args=[]) thread.start()
Marks a view function as being exempt from the cached httpbl view protection.
def cached_httpbl_exempt(view_func): """ Marks a view function as being exempt from the cached httpbl view protection. """ # We could just do view_func.cached_httpbl_exempt = True, but decorators # are nicer if they don't have side-effects, so we return a new # function. def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.cached_httpbl_exempt = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
Hook point for overriding how the CounterPool gets its connection to AWS.
def get_conn(self, aws_access_key=None, aws_secret_key=None): ''' Hook point for overriding how the CounterPool gets its connection to AWS. ''' return boto.connect_dynamodb( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, )
Hook point for overriding how the CounterPool determines the schema to be used when creating a missing table.
def get_schema(self): ''' Hook point for overriding how the CounterPool determines the schema to be used when creating a missing table. ''' if not self.schema: raise NotImplementedError( 'You must provide a schema value or override the get_schema method' ) return self.conn.create_schema(**self.schema)
Hook point for overriding how the CounterPool creates a new table in DynamooDB
def create_table(self): ''' Hook point for overriding how the CounterPool creates a new table in DynamooDB ''' table = self.conn.create_table( name=self.get_table_name(), schema=self.get_schema(), read_units=self.get_read_units(), write_units=self.get_write_units(), ) if table.status != 'ACTIVE': table.refresh(wait_for_active=True, retry_seconds=1) return table
Hook point for overriding how the CounterPool transforms table_name into a boto DynamoDB Table object.
def get_table(self): ''' Hook point for overriding how the CounterPool transforms table_name into a boto DynamoDB Table object. ''' if hasattr(self, '_table'): table = self._table else: try: table = self.conn.get_table(self.get_table_name()) except boto.exception.DynamoDBResponseError: if self.auto_create_table: table = self.create_table() else: raise self._table = table return table
Hook point for overriding how the CouterPool creates a DynamoDB item for a given counter when an existing item can t be found.
def create_item(self, hash_key, start=0, extra_attrs=None): ''' Hook point for overriding how the CouterPool creates a DynamoDB item for a given counter when an existing item can't be found. ''' table = self.get_table() now = datetime.utcnow().replace(microsecond=0).isoformat() attrs = { 'created_on': now, 'modified_on': now, 'count': start, } if extra_attrs: attrs.update(extra_attrs) item = table.new_item( hash_key=hash_key, attrs=attrs, ) return item
Hook point for overriding how the CouterPool fetches a DynamoDB item for a given counter.
def get_item(self, hash_key, start=0, extra_attrs=None): ''' Hook point for overriding how the CouterPool fetches a DynamoDB item for a given counter. ''' table = self.get_table() try: item = table.get_item(hash_key=hash_key) except DynamoDBKeyNotFoundError: item = None if item is None: item = self.create_item( hash_key=hash_key, start=start, extra_attrs=extra_attrs, ) return item
Gets the DynamoDB item behind a counter and ties it to a Counter instace.
def get_counter(self, name, start=0): ''' Gets the DynamoDB item behind a counter and ties it to a Counter instace. ''' item = self.get_item(hash_key=name, start=start) counter = Counter(dynamo_item=item, pool=self) return counter
Matches this descriptor to another descriptor exactly. Args: descriptor: another descriptor to match this one. Returns: True if descriptors match or False otherwise.
def exact_match(self, descriptor): """ Matches this descriptor to another descriptor exactly. Args: descriptor: another descriptor to match this one. Returns: True if descriptors match or False otherwise. """ return self._exact_match_field(self._group, descriptor.get_group()) \ and self._exact_atch_field(self._type, descriptor.get_type()) \ and self._exact_match_field(self._kind, descriptor.get_kind()) \ and self._exact_match_field(self._name, descriptor.get_name()) \ and self._exact_match_field(self._version, descriptor.get_version())
Use an event to build a many - to - one relationship on a class.
def many_to_one(clsname, **kw): """Use an event to build a many-to-one relationship on a class. This makes use of the :meth:`.References._reference_table` method to generate a full foreign key relationship to the remote table. """ @declared_attr def m2o(cls): cls._references((cls.__name__, clsname)) return relationship(clsname, **kw) return m2o